aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am7
-rw-r--r--src/Makefile.test.include1
-rw-r--r--src/bench/gcs_filter.cpp79
-rw-r--r--src/blockfilter.cpp8
-rw-r--r--src/blockfilter.h6
-rw-r--r--src/index/blockfilterindex.cpp13
-rw-r--r--src/index/blockfilterindex.h2
-rw-r--r--src/init.cpp2
-rw-r--r--src/net.cpp231
-rw-r--r--src/net.h123
-rw-r--r--src/net_processing.cpp463
-rw-r--r--src/node/connection_types.cpp26
-rw-r--r--src/node/connection_types.h82
-rw-r--r--src/node/eviction.cpp240
-rw-r--r--src/node/eviction.h69
-rw-r--r--src/rpc/blockchain.cpp6
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mempool.cpp146
-rw-r--r--src/test/fuzz/node_eviction.cpp2
-rw-r--r--src/test/fuzz/rpc.cpp1
-rw-r--r--src/test/fuzz/txorphan.cpp143
-rw-r--r--src/test/util/net.cpp3
-rw-r--r--src/test/util/net.h1
-rw-r--r--src/util/error.cpp2
-rw-r--r--src/util/error.h1
-rw-r--r--src/wallet/bdb.cpp6
-rw-r--r--src/wallet/spend.cpp11
-rw-r--r--src/wallet/walletdb.cpp10
28 files changed, 1111 insertions, 574 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index 488ff0e273..3fbbe180fc 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -139,6 +139,7 @@ BITCOIN_CORE_H = \
compat/cpuid.h \
compat/endian.h \
compressor.h \
+ node/connection_types.h \
consensus/consensus.h \
consensus/tx_check.h \
consensus/tx_verify.h \
@@ -148,6 +149,7 @@ BITCOIN_CORE_H = \
dbwrapper.h \
deploymentinfo.h \
deploymentstatus.h \
+ node/eviction.h \
external_signer.h \
flatfile.h \
fs.h \
@@ -373,7 +375,9 @@ libbitcoin_node_a_SOURCES = \
node/caches.cpp \
node/chainstate.cpp \
node/coin.cpp \
+ node/connection_types.cpp \
node/context.cpp \
+ node/eviction.cpp \
node/interfaces.cpp \
node/miner.cpp \
node/minisketchwrapper.cpp \
@@ -854,8 +858,7 @@ endif
# TODO: libbitcoinkernel is a work in progress consensus engine library, as more
# and more modules are decoupled from the consensus engine, this list will
-# shrink to only those which are absolutely necessary. For example, things
-# like index/*.cpp will be removed.
+# shrink to only those which are absolutely necessary.
libbitcoinkernel_la_SOURCES = \
kernel/bitcoinkernel.cpp \
arith_uint256.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 098feacb3d..b806b62d5b 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -325,6 +325,7 @@ test_fuzz_fuzz_SOURCES = \
test/fuzz/tx_in.cpp \
test/fuzz/tx_out.cpp \
test/fuzz/tx_pool.cpp \
+ test/fuzz/txorphan.cpp \
test/fuzz/txrequest.cpp \
test/fuzz/utxo_snapshot.cpp \
test/fuzz/validation_load_mempool.cpp \
diff --git a/src/bench/gcs_filter.cpp b/src/bench/gcs_filter.cpp
index 607e4392b7..80babb213b 100644
--- a/src/bench/gcs_filter.cpp
+++ b/src/bench/gcs_filter.cpp
@@ -5,39 +5,84 @@
#include <bench/bench.h>
#include <blockfilter.h>
-static void ConstructGCSFilter(benchmark::Bench& bench)
+static const GCSFilter::ElementSet GenerateGCSTestElements()
{
GCSFilter::ElementSet elements;
- for (int i = 0; i < 10000; ++i) {
+
+ // Testing the benchmarks with different number of elements show that a filter
+ // with at least 100,000 elements results in benchmarks that have the same
+ // ns/op. This makes it easy to reason about how long (in nanoseconds) a single
+ // filter element takes to process.
+ for (int i = 0; i < 100000; ++i) {
GCSFilter::Element element(32);
element[0] = static_cast<unsigned char>(i);
element[1] = static_cast<unsigned char>(i >> 8);
elements.insert(std::move(element));
}
+ return elements;
+}
+
+static void GCSBlockFilterGetHash(benchmark::Bench& bench)
+{
+ auto elements = GenerateGCSTestElements();
+
+ GCSFilter filter({0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, elements);
+ BlockFilter block_filter(BlockFilterType::BASIC, {}, filter.GetEncoded(), /*skip_decode_check=*/false);
+
+ bench.run([&] {
+ block_filter.GetHash();
+ });
+}
+
+static void GCSFilterConstruct(benchmark::Bench& bench)
+{
+ auto elements = GenerateGCSTestElements();
+
uint64_t siphash_k0 = 0;
- bench.batch(elements.size()).unit("elem").run([&] {
- GCSFilter filter({siphash_k0, 0, 20, 1 << 20}, elements);
+ bench.run([&]{
+ GCSFilter filter({siphash_k0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, elements);
siphash_k0++;
});
}
-static void MatchGCSFilter(benchmark::Bench& bench)
+static void GCSFilterDecode(benchmark::Bench& bench)
{
- GCSFilter::ElementSet elements;
- for (int i = 0; i < 10000; ++i) {
- GCSFilter::Element element(32);
- element[0] = static_cast<unsigned char>(i);
- element[1] = static_cast<unsigned char>(i >> 8);
- elements.insert(std::move(element));
- }
- GCSFilter filter({0, 0, 20, 1 << 20}, elements);
+ auto elements = GenerateGCSTestElements();
- bench.unit("elem").run([&] {
- filter.Match(GCSFilter::Element());
+ GCSFilter filter({0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, elements);
+ auto encoded = filter.GetEncoded();
+
+ bench.run([&] {
+ GCSFilter filter({0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, encoded, /*skip_decode_check=*/false);
});
}
-BENCHMARK(ConstructGCSFilter);
-BENCHMARK(MatchGCSFilter);
+static void GCSFilterDecodeSkipCheck(benchmark::Bench& bench)
+{
+ auto elements = GenerateGCSTestElements();
+
+ GCSFilter filter({0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, elements);
+ auto encoded = filter.GetEncoded();
+
+ bench.run([&] {
+ GCSFilter filter({0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, encoded, /*skip_decode_check=*/true);
+ });
+}
+
+static void GCSFilterMatch(benchmark::Bench& bench)
+{
+ auto elements = GenerateGCSTestElements();
+
+ GCSFilter filter({0, 0, BASIC_FILTER_P, BASIC_FILTER_M}, elements);
+
+ bench.run([&] {
+ filter.Match(GCSFilter::Element());
+ });
+}
+BENCHMARK(GCSBlockFilterGetHash);
+BENCHMARK(GCSFilterConstruct);
+BENCHMARK(GCSFilterDecode);
+BENCHMARK(GCSFilterDecodeSkipCheck);
+BENCHMARK(GCSFilterMatch);
diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp
index 63a9ba498f..1ad6872143 100644
--- a/src/blockfilter.cpp
+++ b/src/blockfilter.cpp
@@ -47,7 +47,7 @@ GCSFilter::GCSFilter(const Params& params)
: m_params(params), m_N(0), m_F(0), m_encoded{0}
{}
-GCSFilter::GCSFilter(const Params& params, std::vector<unsigned char> encoded_filter)
+GCSFilter::GCSFilter(const Params& params, std::vector<unsigned char> encoded_filter, bool skip_decode_check)
: m_params(params), m_encoded(std::move(encoded_filter))
{
SpanReader stream{GCS_SER_TYPE, GCS_SER_VERSION, m_encoded};
@@ -59,6 +59,8 @@ GCSFilter::GCSFilter(const Params& params, std::vector<unsigned char> encoded_fi
}
m_F = static_cast<uint64_t>(m_N) * static_cast<uint64_t>(m_params.m_M);
+ if (skip_decode_check) return;
+
// Verify that the encoded filter contains exactly N elements. If it has too much or too little
// data, a std::ios_base::failure exception will be raised.
BitStreamReader<SpanReader> bitreader{stream};
@@ -219,14 +221,14 @@ static GCSFilter::ElementSet BasicFilterElements(const CBlock& block,
}
BlockFilter::BlockFilter(BlockFilterType filter_type, const uint256& block_hash,
- std::vector<unsigned char> filter)
+ std::vector<unsigned char> filter, bool skip_decode_check)
: m_filter_type(filter_type), m_block_hash(block_hash)
{
GCSFilter::Params params;
if (!BuildParams(params)) {
throw std::invalid_argument("unknown filter_type");
}
- m_filter = GCSFilter(params, std::move(filter));
+ m_filter = GCSFilter(params, std::move(filter), skip_decode_check);
}
BlockFilter::BlockFilter(BlockFilterType filter_type, const CBlock& block, const CBlockUndo& block_undo)
diff --git a/src/blockfilter.h b/src/blockfilter.h
index 96cefbf3b2..d6a51e95c2 100644
--- a/src/blockfilter.h
+++ b/src/blockfilter.h
@@ -59,7 +59,7 @@ public:
explicit GCSFilter(const Params& params = Params());
/** Reconstructs an already-created filter from an encoding. */
- GCSFilter(const Params& params, std::vector<unsigned char> encoded_filter);
+ GCSFilter(const Params& params, std::vector<unsigned char> encoded_filter, bool skip_decode_check);
/** Builds a new filter from the params and set of elements. */
GCSFilter(const Params& params, const ElementSet& elements);
@@ -122,7 +122,7 @@ public:
//! Reconstruct a BlockFilter from parts.
BlockFilter(BlockFilterType filter_type, const uint256& block_hash,
- std::vector<unsigned char> filter);
+ std::vector<unsigned char> filter, bool skip_decode_check);
//! Construct a new BlockFilter of the specified type from a block.
BlockFilter(BlockFilterType filter_type, const CBlock& block, const CBlockUndo& block_undo);
@@ -164,7 +164,7 @@ public:
if (!BuildParams(params)) {
throw std::ios_base::failure("unknown filter_type");
}
- m_filter = GCSFilter(params, std::move(encoded_filter));
+ m_filter = GCSFilter(params, std::move(encoded_filter), /*skip_decode_check=*/false);
}
};
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index c92b8c7e19..e7fad8eb64 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -5,6 +5,7 @@
#include <map>
#include <dbwrapper.h>
+#include <hash.h>
#include <index/blockfilterindex.h>
#include <node/blockstorage.h>
#include <util/system.h>
@@ -143,18 +144,22 @@ bool BlockFilterIndex::CommitInternal(CDBBatch& batch)
return BaseIndex::CommitInternal(batch);
}
-bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, BlockFilter& filter) const
+bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256& hash, BlockFilter& filter) const
{
CAutoFile filein(m_filter_fileseq->Open(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull()) {
return false;
}
+ // Check that the hash of the encoded_filter matches the one stored in the db.
uint256 block_hash;
std::vector<uint8_t> encoded_filter;
try {
filein >> block_hash >> encoded_filter;
- filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter));
+ uint256 result;
+ CHash256().Write(encoded_filter).Finalize(result);
+ if (result != hash) return error("Checksum mismatch in filter decode.");
+ filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true);
}
catch (const std::exception& e) {
return error("%s: Failed to deserialize block filter from disk: %s", __func__, e.what());
@@ -381,7 +386,7 @@ bool BlockFilterIndex::LookupFilter(const CBlockIndex* block_index, BlockFilter&
return false;
}
- return ReadFilterFromDisk(entry.pos, filter_out);
+ return ReadFilterFromDisk(entry.pos, entry.hash, filter_out);
}
bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out)
@@ -425,7 +430,7 @@ bool BlockFilterIndex::LookupFilterRange(int start_height, const CBlockIndex* st
filters_out.resize(entries.size());
auto filter_pos_it = filters_out.begin();
for (const auto& entry : entries) {
- if (!ReadFilterFromDisk(entry.pos, *filter_pos_it)) {
+ if (!ReadFilterFromDisk(entry.pos, entry.hash, *filter_pos_it)) {
return false;
}
++filter_pos_it;
diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h
index 6deff59000..fef8b573e8 100644
--- a/src/index/blockfilterindex.h
+++ b/src/index/blockfilterindex.h
@@ -31,7 +31,7 @@ private:
FlatFilePos m_next_filter_pos;
std::unique_ptr<FlatFileSeq> m_filter_fileseq;
- bool ReadFilterFromDisk(const FlatFilePos& pos, BlockFilter& filter) const;
+ bool ReadFilterFromDisk(const FlatFilePos& pos, const uint256& hash, BlockFilter& filter) const;
size_t WriteFilterToDisk(FlatFilePos& pos, const BlockFilter& filter);
Mutex m_cs_headers_cache;
diff --git a/src/init.cpp b/src/init.cpp
index e17728373b..eff37e1a83 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -967,7 +967,7 @@ bool AppInitParameterInteraction(const ArgsManager& args, bool use_syscall_sandb
peer_connect_timeout = args.GetIntArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
if (peer_connect_timeout <= 0) {
- return InitError(Untranslated("peertimeout cannot be configured with a negative value."));
+ return InitError(Untranslated("peertimeout must be a positive integer."));
}
if (args.IsArgSet("-minrelaytxfee")) {
diff --git a/src/net.cpp b/src/net.cpp
index 7f4e571c8d..c37d90519c 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -16,6 +16,7 @@
#include <compat.h>
#include <consensus/consensus.h>
#include <crypto/sha256.h>
+#include <node/eviction.h>
#include <fs.h>
#include <i2p.h>
#include <net_permissions.h>
@@ -576,26 +577,6 @@ void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNet
}
}
-std::string ConnectionTypeAsString(ConnectionType conn_type)
-{
- switch (conn_type) {
- case ConnectionType::INBOUND:
- return "inbound";
- case ConnectionType::MANUAL:
- return "manual";
- case ConnectionType::FEELER:
- return "feeler";
- case ConnectionType::OUTBOUND_FULL_RELAY:
- return "outbound-full-relay";
- case ConnectionType::BLOCK_RELAY:
- return "block-relay-only";
- case ConnectionType::ADDR_FETCH:
- return "addr-fetch";
- } // no default case, so the compiler can warn about missing cases
-
- assert(false);
-}
-
CService CNode::GetAddrLocal() const
{
AssertLockNotHeld(m_addr_local_mutex);
@@ -877,210 +858,6 @@ size_t CConnman::SocketSendData(CNode& node) const
return nSentSize;
}
-static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
-{
- return a.m_min_ping_time > b.m_min_ping_time;
-}
-
-static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
-{
- return a.m_connected > b.m_connected;
-}
-
-static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) {
- return a.nKeyedNetGroup < b.nKeyedNetGroup;
-}
-
-static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
-{
- // There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block.
- if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
- if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
- return a.m_connected > b.m_connected;
-}
-
-static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
-{
- // There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
- if (a.m_last_tx_time != b.m_last_tx_time) return a.m_last_tx_time < b.m_last_tx_time;
- if (a.m_relay_txs != b.m_relay_txs) return b.m_relay_txs;
- if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
- return a.m_connected > b.m_connected;
-}
-
-// Pick out the potential block-relay only peers, and sort them by last block time.
-static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
-{
- if (a.m_relay_txs != b.m_relay_txs) return a.m_relay_txs;
- if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
- if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
- return a.m_connected > b.m_connected;
-}
-
-/**
- * Sort eviction candidates by network/localhost and connection uptime.
- * Candidates near the beginning are more likely to be evicted, and those
- * near the end are more likely to be protected, e.g. less likely to be evicted.
- * - First, nodes that are not `is_local` and that do not belong to `network`,
- * sorted by increasing uptime (from most recently connected to connected longer).
- * - Then, nodes that are `is_local` or belong to `network`, sorted by increasing uptime.
- */
-struct CompareNodeNetworkTime {
- const bool m_is_local;
- const Network m_network;
- CompareNodeNetworkTime(bool is_local, Network network) : m_is_local(is_local), m_network(network) {}
- bool operator()(const NodeEvictionCandidate& a, const NodeEvictionCandidate& b) const
- {
- if (m_is_local && a.m_is_local != b.m_is_local) return b.m_is_local;
- if ((a.m_network == m_network) != (b.m_network == m_network)) return b.m_network == m_network;
- return a.m_connected > b.m_connected;
- };
-};
-
-//! Sort an array by the specified comparator, then erase the last K elements where predicate is true.
-template <typename T, typename Comparator>
-static void EraseLastKElements(
- std::vector<T>& elements, Comparator comparator, size_t k,
- std::function<bool(const NodeEvictionCandidate&)> predicate = [](const NodeEvictionCandidate& n) { return true; })
-{
- std::sort(elements.begin(), elements.end(), comparator);
- size_t eraseSize = std::min(k, elements.size());
- elements.erase(std::remove_if(elements.end() - eraseSize, elements.end(), predicate), elements.end());
-}
-
-void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& eviction_candidates)
-{
- // Protect the half of the remaining nodes which have been connected the longest.
- // This replicates the non-eviction implicit behavior, and precludes attacks that start later.
- // To favorise the diversity of our peer connections, reserve up to half of these protected
- // spots for Tor/onion, localhost, I2P, and CJDNS peers, even if they're not longest uptime
- // overall. This helps protect these higher-latency peers that tend to be otherwise
- // disadvantaged under our eviction criteria.
- const size_t initial_size = eviction_candidates.size();
- const size_t total_protect_size{initial_size / 2};
-
- // Disadvantaged networks to protect. In the case of equal counts, earlier array members
- // have the first opportunity to recover unused slots from the previous iteration.
- struct Net { bool is_local; Network id; size_t count; };
- std::array<Net, 4> networks{
- {{false, NET_CJDNS, 0}, {false, NET_I2P, 0}, {/*localhost=*/true, NET_MAX, 0}, {false, NET_ONION, 0}}};
-
- // Count and store the number of eviction candidates per network.
- for (Net& n : networks) {
- n.count = std::count_if(eviction_candidates.cbegin(), eviction_candidates.cend(),
- [&n](const NodeEvictionCandidate& c) {
- return n.is_local ? c.m_is_local : c.m_network == n.id;
- });
- }
- // Sort `networks` by ascending candidate count, to give networks having fewer candidates
- // the first opportunity to recover unused protected slots from the previous iteration.
- std::stable_sort(networks.begin(), networks.end(), [](Net a, Net b) { return a.count < b.count; });
-
- // Protect up to 25% of the eviction candidates by disadvantaged network.
- const size_t max_protect_by_network{total_protect_size / 2};
- size_t num_protected{0};
-
- while (num_protected < max_protect_by_network) {
- // Count the number of disadvantaged networks from which we have peers to protect.
- auto num_networks = std::count_if(networks.begin(), networks.end(), [](const Net& n) { return n.count; });
- if (num_networks == 0) {
- break;
- }
- const size_t disadvantaged_to_protect{max_protect_by_network - num_protected};
- const size_t protect_per_network{std::max(disadvantaged_to_protect / num_networks, static_cast<size_t>(1))};
- // Early exit flag if there are no remaining candidates by disadvantaged network.
- bool protected_at_least_one{false};
-
- for (Net& n : networks) {
- if (n.count == 0) continue;
- const size_t before = eviction_candidates.size();
- EraseLastKElements(eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id),
- protect_per_network, [&n](const NodeEvictionCandidate& c) {
- return n.is_local ? c.m_is_local : c.m_network == n.id;
- });
- const size_t after = eviction_candidates.size();
- if (before > after) {
- protected_at_least_one = true;
- const size_t delta{before - after};
- num_protected += delta;
- if (num_protected >= max_protect_by_network) {
- break;
- }
- n.count -= delta;
- }
- }
- if (!protected_at_least_one) {
- break;
- }
- }
-
- // Calculate how many we removed, and update our total number of peers that
- // we want to protect based on uptime accordingly.
- assert(num_protected == initial_size - eviction_candidates.size());
- const size_t remaining_to_protect{total_protect_size - num_protected};
- EraseLastKElements(eviction_candidates, ReverseCompareNodeTimeConnected, remaining_to_protect);
-}
-
-[[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates)
-{
- // Protect connections with certain characteristics
-
- // Deterministically select 4 peers to protect by netgroup.
- // An attacker cannot predict which netgroups will be protected
- EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4);
- // Protect the 8 nodes with the lowest minimum ping time.
- // An attacker cannot manipulate this metric without physically moving nodes closer to the target.
- EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8);
- // Protect 4 nodes that most recently sent us novel transactions accepted into our mempool.
- // An attacker cannot manipulate this metric without performing useful work.
- EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4);
- // Protect up to 8 non-tx-relay peers that have sent us novel blocks.
- EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8,
- [](const NodeEvictionCandidate& n) { return !n.m_relay_txs && n.fRelevantServices; });
-
- // Protect 4 nodes that most recently sent us novel blocks.
- // An attacker cannot manipulate this metric without performing useful work.
- EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4);
-
- // Protect some of the remaining eviction candidates by ratios of desirable
- // or disadvantaged characteristics.
- ProtectEvictionCandidatesByRatio(vEvictionCandidates);
-
- if (vEvictionCandidates.empty()) return std::nullopt;
-
- // If any remaining peers are preferred for eviction consider only them.
- // This happens after the other preferences since if a peer is really the best by other criteria (esp relaying blocks)
- // then we probably don't want to evict it no matter what.
- if (std::any_of(vEvictionCandidates.begin(),vEvictionCandidates.end(),[](NodeEvictionCandidate const &n){return n.prefer_evict;})) {
- vEvictionCandidates.erase(std::remove_if(vEvictionCandidates.begin(),vEvictionCandidates.end(),
- [](NodeEvictionCandidate const &n){return !n.prefer_evict;}),vEvictionCandidates.end());
- }
-
- // Identify the network group with the most connections and youngest member.
- // (vEvictionCandidates is already sorted by reverse connect time)
- uint64_t naMostConnections;
- unsigned int nMostConnections = 0;
- std::chrono::seconds nMostConnectionsTime{0};
- std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapNetGroupNodes;
- for (const NodeEvictionCandidate &node : vEvictionCandidates) {
- std::vector<NodeEvictionCandidate> &group = mapNetGroupNodes[node.nKeyedNetGroup];
- group.push_back(node);
- const auto grouptime{group[0].m_connected};
-
- if (group.size() > nMostConnections || (group.size() == nMostConnections && grouptime > nMostConnectionsTime)) {
- nMostConnections = group.size();
- nMostConnectionsTime = grouptime;
- naMostConnections = node.nKeyedNetGroup;
- }
- }
-
- // Reduce to the network group with the most connections
- vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]);
-
- // Disconnect from the network group with the most connections
- return vEvictionCandidates.front().id;
-}
-
/** Try to find a connection to evict when the node is full.
* Extreme care must be taken to avoid opening the node to attacker
* triggered network partitioning.
@@ -1096,10 +873,6 @@ bool CConnman::AttemptToEvictConnection()
LOCK(m_nodes_mutex);
for (const CNode* node : m_nodes) {
- if (node->HasPermission(NetPermissionFlags::NoBan))
- continue;
- if (!node->IsInboundConn())
- continue;
if (node->fDisconnect)
continue;
NodeEvictionCandidate candidate{
@@ -1115,6 +888,8 @@ bool CConnman::AttemptToEvictConnection()
Desig(prefer_evict) node->m_prefer_evict,
Desig(m_is_local) node->addr.IsLocal(),
Desig(m_network) node->ConnectedThroughNetwork(),
+ Desig(m_noban) node->HasPermission(NetPermissionFlags::NoBan),
+ Desig(m_conn_type) node->m_conn_type,
};
vEvictionCandidates.push_back(candidate);
}
diff --git a/src/net.h b/src/net.h
index bf169649c0..6453ad1dc7 100644
--- a/src/net.h
+++ b/src/net.h
@@ -9,6 +9,7 @@
#include <chainparams.h>
#include <common/bloom.h>
#include <compat.h>
+#include <node/connection_types.h>
#include <consensus/amount.h>
#include <crypto/siphash.h>
#include <hash.h>
@@ -121,78 +122,6 @@ struct CSerializedNetMsg {
std::string m_type;
};
-/** Different types of connections to a peer. This enum encapsulates the
- * information we have available at the time of opening or accepting the
- * connection. Aside from INBOUND, all types are initiated by us.
- *
- * If adding or removing types, please update CONNECTION_TYPE_DOC in
- * src/rpc/net.cpp and src/qt/rpcconsole.cpp, as well as the descriptions in
- * src/qt/guiutil.cpp and src/bitcoin-cli.cpp::NetinfoRequestHandler. */
-enum class ConnectionType {
- /**
- * Inbound connections are those initiated by a peer. This is the only
- * property we know at the time of connection, until P2P messages are
- * exchanged.
- */
- INBOUND,
-
- /**
- * These are the default connections that we use to connect with the
- * network. There is no restriction on what is relayed; by default we relay
- * blocks, addresses & transactions. We automatically attempt to open
- * MAX_OUTBOUND_FULL_RELAY_CONNECTIONS using addresses from our AddrMan.
- */
- OUTBOUND_FULL_RELAY,
-
-
- /**
- * We open manual connections to addresses that users explicitly requested
- * via the addnode RPC or the -addnode/-connect configuration options. Even if a
- * manual connection is misbehaving, we do not automatically disconnect or
- * add it to our discouragement filter.
- */
- MANUAL,
-
- /**
- * Feeler connections are short-lived connections made to check that a node
- * is alive. They can be useful for:
- * - test-before-evict: if one of the peers is considered for eviction from
- * our AddrMan because another peer is mapped to the same slot in the tried table,
- * evict only if this longer-known peer is offline.
- * - move node addresses from New to Tried table, so that we have more
- * connectable addresses in our AddrMan.
- * Note that in the literature ("Eclipse Attacks on Bitcoin’s Peer-to-Peer Network")
- * only the latter feature is referred to as "feeler connections",
- * although in our codebase feeler connections encompass test-before-evict as well.
- * We make these connections approximately every FEELER_INTERVAL:
- * first we resolve previously found collisions if they exist (test-before-evict),
- * otherwise we connect to a node from the new table.
- */
- FEELER,
-
- /**
- * We use block-relay-only connections to help prevent against partition
- * attacks. By not relaying transactions or addresses, these connections
- * are harder to detect by a third party, thus helping obfuscate the
- * network topology. We automatically attempt to open
- * MAX_BLOCK_RELAY_ONLY_ANCHORS using addresses from our anchors.dat. Then
- * addresses from our AddrMan if MAX_BLOCK_RELAY_ONLY_CONNECTIONS
- * isn't reached yet.
- */
- BLOCK_RELAY,
-
- /**
- * AddrFetch connections are short lived connections used to solicit
- * addresses from peers. These are initiated to addresses submitted via the
- * -seednode command line argument, or under certain conditions when the
- * AddrMan is empty.
- */
- ADDR_FETCH,
-};
-
-/** Convert ConnectionType enum to a string value */
-std::string ConnectionTypeAsString(ConnectionType conn_type);
-
/**
* Look up IP addresses from all interfaces on the machine and add them to the
* list of local addresses to self-advertise.
@@ -1247,54 +1176,4 @@ extern std::function<void(const CAddress& addr,
bool is_incoming)>
CaptureMessage;
-struct NodeEvictionCandidate
-{
- NodeId id;
- std::chrono::seconds m_connected;
- std::chrono::microseconds m_min_ping_time;
- std::chrono::seconds m_last_block_time;
- std::chrono::seconds m_last_tx_time;
- bool fRelevantServices;
- bool m_relay_txs;
- bool fBloomFilter;
- uint64_t nKeyedNetGroup;
- bool prefer_evict;
- bool m_is_local;
- Network m_network;
-};
-
-/**
- * Select an inbound peer to evict after filtering out (protecting) peers having
- * distinct, difficult-to-forge characteristics. The protection logic picks out
- * fixed numbers of desirable peers per various criteria, followed by (mostly)
- * ratios of desirable or disadvantaged peers. If any eviction candidates
- * remain, the selection logic chooses a peer to evict.
- */
-[[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates);
-
-/** Protect desirable or disadvantaged inbound peers from eviction by ratio.
- *
- * This function protects half of the peers which have been connected the
- * longest, to replicate the non-eviction implicit behavior and preclude attacks
- * that start later.
- *
- * Half of these protected spots (1/4 of the total) are reserved for the
- * following categories of peers, sorted by longest uptime, even if they're not
- * longest uptime overall:
- *
- * - onion peers connected via our tor control service
- *
- * - localhost peers, as manually configured hidden services not using
- * `-bind=addr[:port]=onion` will not be detected as inbound onion connections
- *
- * - I2P peers
- *
- * - CJDNS peers
- *
- * This helps protect these privacy network peers, which tend to be otherwise
- * disadvantaged under our eviction criteria for their higher min ping times
- * relative to IPv4/IPv6 peers, and favorise the diversity of peer connections.
- */
-void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvictionCandidates);
-
#endif // BITCOIN_NET_H
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index db711be130..c33dd29923 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -61,6 +61,8 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
* Timeout = base + per_header * (expected number of headers) */
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
+/** How long to wait for a peer to respond to a getheaders request */
+static constexpr auto HEADERS_RESPONSE_TIME{2min};
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
@@ -355,6 +357,9 @@ struct Peer {
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
+ /** Time of the last getheaders message to this peer */
+ std::atomic<NodeClock::time_point> m_last_getheaders_timestamp{NodeSeconds{}};
+
Peer(NodeId id)
: m_id{id}
{}
@@ -501,7 +506,7 @@ public:
private:
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
- void ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -560,6 +565,22 @@ private:
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+ /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */
+ /** Deal with state tracking and headers sync for peers that send the
+ * occasional non-connecting header (this can happen due to BIP 130 headers
+ * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */
+ void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers);
+ /** Return true if the headers connect to each other, false otherwise */
+ bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
+ /** Request further headers from this peer with a given locator.
+ * We don't issue a getheaders message if we have a recent one outstanding.
+ * This returns true if a getheaders is actually sent, and false otherwise.
+ */
+ bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer);
+ /** Potentially fetch blocks from this peer upon receipt of a new headers tip */
+ void HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast);
+ /** Update peer state based on received headers message */
+ void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers);
void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
@@ -2194,6 +2215,203 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlo
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
}
+/**
+ * Special handling for unconnecting headers that might be part of a block
+ * announcement.
+ *
+ * We'll send a getheaders message in response to try to connect the chain.
+ *
+ * The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
+ * don't connect before given DoS points.
+ *
+ * Once a headers message is received that is valid and does connect,
+ * nUnconnectingHeaders gets reset back to 0.
+ */
+void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
+ const std::vector<CBlockHeader>& headers)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+
+ nodestate->nUnconnectingHeaders++;
+ // Try to fill in the missing headers.
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), peer)) {
+ LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
+ headers[0].GetHash().ToString(),
+ headers[0].hashPrevBlock.ToString(),
+ m_chainman.m_best_header->nHeight,
+ pfrom.GetId(), nodestate->nUnconnectingHeaders);
+ }
+ // Set hashLastUnknownBlock for this peer, so that if we
+ // eventually get the headers - even from a different peer -
+ // we can use this peer to download.
+ UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
+
+ // The peer may just be broken, so periodically assign DoS points if this
+ // condition persists.
+ if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
+ Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
+ }
+}
+
+bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
+{
+ uint256 hashLastBlock;
+ for (const CBlockHeader& header : headers) {
+ if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
+ return false;
+ }
+ hashLastBlock = header.GetHash();
+ }
+ return true;
+}
+
+bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ const auto current_time = NodeClock::now();
+
+ // Only allow a new getheaders message to go out if we don't have a recent
+ // one already in-flight
+ if (current_time - peer.m_last_getheaders_timestamp.load() > HEADERS_RESPONSE_TIME) {
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
+ peer.m_last_getheaders_timestamp = current_time;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Given a new headers tip ending in pindexLast, potentially request blocks towards that tip.
+ * We require that the given tip have at least as much work as our tip, and for
+ * our current tip to be "close to synced" (see CanDirectFetch()).
+ */
+void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast)
+{
+ const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
+
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+
+ if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
+
+ std::vector<const CBlockIndex*> vToFetch;
+ const CBlockIndex *pindexWalk = pindexLast;
+ // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
+ while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
+ !IsBlockRequested(pindexWalk->GetBlockHash()) &&
+ (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
+ // We don't have this block, and it's not yet in flight.
+ vToFetch.push_back(pindexWalk);
+ }
+ pindexWalk = pindexWalk->pprev;
+ }
+ // If pindexWalk still isn't on our main chain, we're looking at a
+ // very large reorg at a time we think we're close to caught up to
+ // the main chain -- this shouldn't really happen. Bail out on the
+ // direct fetch and rely on parallel download instead.
+ if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
+ LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
+ pindexLast->GetBlockHash().ToString(),
+ pindexLast->nHeight);
+ } else {
+ std::vector<CInv> vGetData;
+ // Download as much as possible, from earliest to latest.
+ for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
+ if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ // Can't download any more from this peer
+ break;
+ }
+ uint32_t nFetchFlags = GetFetchFlags(pfrom);
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ BlockRequested(pfrom.GetId(), *pindex);
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
+ pindex->GetBlockHash().ToString(), pfrom.GetId());
+ }
+ if (vGetData.size() > 1) {
+ LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
+ pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
+ }
+ if (vGetData.size() > 0) {
+ if (!m_ignore_incoming_txs &&
+ nodestate->m_provides_cmpctblocks &&
+ vGetData.size() == 1 &&
+ mapBlocksInFlight.size() == 1 &&
+ pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
+ // In any case, we want to download using a compact block, not a regular one
+ vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
+ }
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ }
+ }
+ }
+}
+
+/**
+ * Given receipt of headers from a peer ending in pindexLast, along with
+ * whether that header was new and whether the headers message was full,
+ * update the state we keep for the peer.
+ */
+void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom,
+ const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers)
+{
+ LOCK(cs_main);
+ CNodeState *nodestate = State(pfrom.GetId());
+ if (nodestate->nUnconnectingHeaders > 0) {
+ LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
+ }
+ nodestate->nUnconnectingHeaders = 0;
+
+ assert(pindexLast);
+ UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
+
+ // From here, pindexBestKnownBlock should be guaranteed to be non-null,
+ // because it is set in UpdateBlockAvailability. Some nullptr checks
+ // are still present, however, as belt-and-suspenders.
+
+ if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
+ nodestate->m_last_block_announcement = GetTime();
+ }
+
+ // If we're in IBD, we want outbound peers that will serve us a useful
+ // chain. Disconnect peers that are on chains with insufficient work.
+ if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !may_have_more_headers) {
+ // If the peer has no more headers to give us, then we know we have
+ // their tip.
+ if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
+ // This peer has too little work on their headers chain to help
+ // us sync -- disconnect if it is an outbound disconnection
+ // candidate.
+ // Note: We compare their tip to nMinimumChainWork (rather than
+ // m_chainman.ActiveChain().Tip()) because we won't start block download
+ // until we have a headers chain that has at least
+ // nMinimumChainWork, even if a peer has a chain past our tip,
+ // as an anti-DoS measure.
+ if (pfrom.IsOutboundOrBlockRelayConn()) {
+ LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
+ pfrom.fDisconnect = true;
+ }
+ }
+ }
+
+ // If this is an outbound full-relay peer, check to see if we should protect
+ // it from the bad/lagging chain logic.
+ // Note that outbound block-relay peers are excluded from this protection, and
+ // thus always subject to eviction under the bad/lagging chain logic.
+ // See ChainSyncTimeoutState.
+ if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
+ if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
+ LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
+ nodestate->m_chain_sync.m_protect = true;
+ ++m_outbound_peers_with_protect_from_disconnect;
+ }
+ }
+}
+
void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
@@ -2206,55 +2424,33 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
return;
}
- bool received_new_header = false;
const CBlockIndex *pindexLast = nullptr;
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom.GetId());
- // If this looks like it could be a block announcement (nCount <=
- // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
- // don't connect:
- // - Send a getheaders message in response to try to connect the chain.
- // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
- // don't connect before giving DoS points
- // - Once a headers message is received that is valid and does connect,
- // nUnconnectingHeaders gets reset back to 0.
- if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
- nodestate->nUnconnectingHeaders++;
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
- LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
- headers[0].GetHash().ToString(),
- headers[0].hashPrevBlock.ToString(),
- m_chainman.m_best_header->nHeight,
- pfrom.GetId(), nodestate->nUnconnectingHeaders);
- // Set hashLastUnknownBlock for this peer, so that if we
- // eventually get the headers - even from a different peer -
- // we can use this peer to download.
- UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
-
- if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
- Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
- }
- return;
- }
+ // Do these headers connect to something in our block index?
+ bool headers_connect_blockindex{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) != nullptr)};
- uint256 hashLastBlock;
- for (const CBlockHeader& header : headers) {
- if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
- Misbehaving(peer, 20, "non-continuous headers sequence");
- return;
- }
- hashLastBlock = header.GetHash();
+ if (!headers_connect_blockindex) {
+ if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
+ // If this looks like it could be a BIP 130 block announcement, use
+ // special logic for handling headers that don't connect, as this
+ // could be benign.
+ HandleFewUnconnectingHeaders(pfrom, peer, headers);
+ } else {
+ Misbehaving(peer, 10, "invalid header received");
}
+ return;
+ }
- // If we don't have the last header, then they'll have given us
- // something new (if these headers are valid).
- if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) {
- received_new_header = true;
- }
+ // At this point, the headers connect to something in our block index.
+ if (!CheckHeadersAreContinuous(headers)) {
+ Misbehaving(peer, 20, "non-continuous headers sequence");
+ return;
}
+ // If we don't have the last header, then this peer will have given us
+ // something new (if these headers are valid).
+ bool received_new_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()) == nullptr)};
+
BlockValidationState state;
if (!m_chainman.ProcessNewBlockHeaders(headers, state, &pindexLast)) {
if (state.IsInvalid()) {
@@ -2263,123 +2459,20 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
}
}
- {
- LOCK(cs_main);
- CNodeState *nodestate = State(pfrom.GetId());
- if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
- }
- nodestate->nUnconnectingHeaders = 0;
-
- assert(pindexLast);
- UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
-
- // From here, pindexBestKnownBlock should be guaranteed to be non-null,
- // because it is set in UpdateBlockAvailability. Some nullptr checks
- // are still present, however, as belt-and-suspenders.
-
- if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
- nodestate->m_last_block_announcement = GetTime();
- }
-
- if (nCount == MAX_HEADERS_RESULTS) {
- // Headers message had its maximum size; the peer may have more headers.
- // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or m_chainman.m_best_header, continue
- // from there instead.
+ // Consider fetching more headers.
+ if (nCount == MAX_HEADERS_RESULTS) {
+ // Headers message had its maximum size; the peer may have more headers.
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(pindexLast), peer)) {
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
- pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
- }
-
- // If this set of headers is valid and ends in a block with at least as
- // much work as our tip, download as much as possible.
- if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
- std::vector<const CBlockIndex*> vToFetch;
- const CBlockIndex *pindexWalk = pindexLast;
- // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
- while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
- !IsBlockRequested(pindexWalk->GetBlockHash()) &&
- (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
- // We don't have this block, and it's not yet in flight.
- vToFetch.push_back(pindexWalk);
- }
- pindexWalk = pindexWalk->pprev;
- }
- // If pindexWalk still isn't on our main chain, we're looking at a
- // very large reorg at a time we think we're close to caught up to
- // the main chain -- this shouldn't really happen. Bail out on the
- // direct fetch and rely on parallel download instead.
- if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
- LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
- pindexLast->GetBlockHash().ToString(),
- pindexLast->nHeight);
- } else {
- std::vector<CInv> vGetData;
- // Download as much as possible, from earliest to latest.
- for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
- if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- // Can't download any more from this peer
- break;
- }
- uint32_t nFetchFlags = GetFetchFlags(pfrom);
- vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- BlockRequested(pfrom.GetId(), *pindex);
- LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- pindex->GetBlockHash().ToString(), pfrom.GetId());
- }
- if (vGetData.size() > 1) {
- LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
- pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
- }
- if (vGetData.size() > 0) {
- if (!m_ignore_incoming_txs &&
- nodestate->m_provides_cmpctblocks &&
- vGetData.size() == 1 &&
- mapBlocksInFlight.size() == 1 &&
- pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
- // In any case, we want to download using a compact block, not a regular one
- vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
- }
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
- }
- }
- }
- // If we're in IBD, we want outbound peers that will serve us a useful
- // chain. Disconnect peers that are on chains with insufficient work.
- if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
- // When nCount < MAX_HEADERS_RESULTS, we know we have no more
- // headers to fetch from this peer.
- if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
- // This peer has too little work on their headers chain to help
- // us sync -- disconnect if it is an outbound disconnection
- // candidate.
- // Note: We compare their tip to nMinimumChainWork (rather than
- // m_chainman.ActiveChain().Tip()) because we won't start block download
- // until we have a headers chain that has at least
- // nMinimumChainWork, even if a peer has a chain past our tip,
- // as an anti-DoS measure.
- if (pfrom.IsOutboundOrBlockRelayConn()) {
- LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
- pfrom.fDisconnect = true;
- }
- }
- }
-
- // If this is an outbound full-relay peer, check to see if we should protect
- // it from the bad/lagging chain logic.
- // Note that outbound block-relay peers are excluded from this protection, and
- // thus always subject to eviction under the bad/lagging chain logic.
- // See ChainSyncTimeoutState.
- if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
- if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
- LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
- nodestate->m_chain_sync.m_protect = true;
- ++m_outbound_peers_with_protect_from_disconnect;
- }
+ pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
}
}
+ UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
+
+ // Consider immediately downloading blocks.
+ HeadersDirectFetchBlocks(pfrom, pindexLast);
+
return;
}
@@ -3142,8 +3235,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
if (best_block != nullptr) {
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *best_block));
- LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", m_chainman.m_best_header->nHeight, best_block->ToString(), pfrom.GetId());
+ if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer)) {
+ LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
+ m_chainman.m_best_header->nHeight, best_block->ToString(),
+ pfrom.GetId());
+ }
}
return;
@@ -3316,7 +3412,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// others.
if (m_chainman.ActiveTip() == nullptr ||
(m_chainman.ActiveTip()->nChainWork < nMinimumChainWork && !pfrom.HasPermission(NetPermissionFlags::Download))) {
- LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work\n", pfrom.GetId());
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
+ // Just respond with an empty headers message, to tell the peer to
+ // go away but not treat us as unresponsive.
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, std::vector<CBlock>()));
return;
}
@@ -3597,8 +3696,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
- if (!m_chainman.ActiveChainstate().IsInitialBlockDownload())
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
+ if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
+ MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer);
+ }
return;
}
@@ -3872,6 +3972,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
+ // Assume that this is in response to any outstanding getheaders
+ // request we may have sent, and clear out the time of our last request
+ peer->m_last_getheaders_timestamp.store(NodeSeconds{});
+
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
@@ -4300,7 +4404,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
return fMoreWork;
}
-void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds)
+void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
{
AssertLockHeld(cs_main);
@@ -4338,10 +4442,15 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_
pto.fDisconnect = true;
} else {
assert(state.m_chain_sync.m_work_header);
+ // Here, we assume that the getheaders message goes out,
+ // because it'll either go out or be skipped because of a
+ // getheaders in-flight already, in which case the peer should
+ // still respond to us with a sufficiently high work chain tip.
+ MaybeSendGetHeaders(pto,
+ m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev),
+ peer);
LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
- m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
state.m_chain_sync.m_sent_getheaders = true;
- constexpr auto HEADERS_RESPONSE_TIME{2min};
// Bump the timeout to allow a response, which could clear the timeout
// (if the response shows the peer has synced), reset the timeout (if
// the peer syncs to the required work but not to our tip), or result
@@ -4749,15 +4858,6 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
- state.fSyncStarted = true;
- state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
- (
- // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
- // to maintain precision
- std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
- (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
- );
- nSyncStarted++;
const CBlockIndex* pindexStart = m_chainman.m_best_header;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
@@ -4768,8 +4868,19 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
- LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
- m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexStart), uint256()));
+ if (MaybeSendGetHeaders(*pto, m_chainman.ActiveChain().GetLocator(pindexStart), *peer)) {
+ LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
+
+ state.fSyncStarted = true;
+ state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
+ (
+ // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
+ // to maintain precision
+ std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
+ (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
+ );
+ nSyncStarted++;
+ }
}
}
@@ -5115,7 +5226,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Check that outbound peers have reasonable chains
// GetTime() is used by this anti-DoS logic so we can test this using mocktime
- ConsiderEviction(*pto, GetTime<std::chrono::seconds>());
+ ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
//
// Message: getdata (blocks)
diff --git a/src/node/connection_types.cpp b/src/node/connection_types.cpp
new file mode 100644
index 0000000000..904f4371aa
--- /dev/null
+++ b/src/node/connection_types.cpp
@@ -0,0 +1,26 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/connection_types.h>
+#include <cassert>
+
+std::string ConnectionTypeAsString(ConnectionType conn_type)
+{
+ switch (conn_type) {
+ case ConnectionType::INBOUND:
+ return "inbound";
+ case ConnectionType::MANUAL:
+ return "manual";
+ case ConnectionType::FEELER:
+ return "feeler";
+ case ConnectionType::OUTBOUND_FULL_RELAY:
+ return "outbound-full-relay";
+ case ConnectionType::BLOCK_RELAY:
+ return "block-relay-only";
+ case ConnectionType::ADDR_FETCH:
+ return "addr-fetch";
+ } // no default case, so the compiler can warn about missing cases
+
+ assert(false);
+}
diff --git a/src/node/connection_types.h b/src/node/connection_types.h
new file mode 100644
index 0000000000..5e1abcace6
--- /dev/null
+++ b/src/node/connection_types.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_CONNECTION_TYPES_H
+#define BITCOIN_NODE_CONNECTION_TYPES_H
+
+#include <string>
+
+/** Different types of connections to a peer. This enum encapsulates the
+ * information we have available at the time of opening or accepting the
+ * connection. Aside from INBOUND, all types are initiated by us.
+ *
+ * If adding or removing types, please update CONNECTION_TYPE_DOC in
+ * src/rpc/net.cpp and src/qt/rpcconsole.cpp, as well as the descriptions in
+ * src/qt/guiutil.cpp and src/bitcoin-cli.cpp::NetinfoRequestHandler. */
+enum class ConnectionType {
+ /**
+ * Inbound connections are those initiated by a peer. This is the only
+ * property we know at the time of connection, until P2P messages are
+ * exchanged.
+ */
+ INBOUND,
+
+ /**
+ * These are the default connections that we use to connect with the
+ * network. There is no restriction on what is relayed; by default we relay
+ * blocks, addresses & transactions. We automatically attempt to open
+ * MAX_OUTBOUND_FULL_RELAY_CONNECTIONS using addresses from our AddrMan.
+ */
+ OUTBOUND_FULL_RELAY,
+
+
+ /**
+ * We open manual connections to addresses that users explicitly requested
+ * via the addnode RPC or the -addnode/-connect configuration options. Even if a
+ * manual connection is misbehaving, we do not automatically disconnect or
+ * add it to our discouragement filter.
+ */
+ MANUAL,
+
+ /**
+ * Feeler connections are short-lived connections made to check that a node
+ * is alive. They can be useful for:
+ * - test-before-evict: if one of the peers is considered for eviction from
+ * our AddrMan because another peer is mapped to the same slot in the tried table,
+ * evict only if this longer-known peer is offline.
+ * - move node addresses from New to Tried table, so that we have more
+ * connectable addresses in our AddrMan.
+ * Note that in the literature ("Eclipse Attacks on Bitcoin’s Peer-to-Peer Network")
+ * only the latter feature is referred to as "feeler connections",
+ * although in our codebase feeler connections encompass test-before-evict as well.
+ * We make these connections approximately every FEELER_INTERVAL:
+ * first we resolve previously found collisions if they exist (test-before-evict),
+ * otherwise we connect to a node from the new table.
+ */
+ FEELER,
+
+ /**
+ * We use block-relay-only connections to help prevent against partition
+ * attacks. By not relaying transactions or addresses, these connections
+ * are harder to detect by a third party, thus helping obfuscate the
+ * network topology. We automatically attempt to open
+ * MAX_BLOCK_RELAY_ONLY_ANCHORS using addresses from our anchors.dat. Then
+ * addresses from our AddrMan if MAX_BLOCK_RELAY_ONLY_CONNECTIONS
+ * isn't reached yet.
+ */
+ BLOCK_RELAY,
+
+ /**
+ * AddrFetch connections are short lived connections used to solicit
+ * addresses from peers. These are initiated to addresses submitted via the
+ * -seednode command line argument, or under certain conditions when the
+ * AddrMan is empty.
+ */
+ ADDR_FETCH,
+};
+
+/** Convert ConnectionType enum to a string value */
+std::string ConnectionTypeAsString(ConnectionType conn_type);
+
+#endif // BITCOIN_NODE_CONNECTION_TYPES_H
diff --git a/src/node/eviction.cpp b/src/node/eviction.cpp
new file mode 100644
index 0000000000..33406931d4
--- /dev/null
+++ b/src/node/eviction.cpp
@@ -0,0 +1,240 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <node/eviction.h>
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <vector>
+
+
+static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ return a.m_min_ping_time > b.m_min_ping_time;
+}
+
+static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ return a.m_connected > b.m_connected;
+}
+
+static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) {
+ return a.nKeyedNetGroup < b.nKeyedNetGroup;
+}
+
+static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have many peers which have not yet relayed a block.
+ if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
+ if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
+ return a.m_connected > b.m_connected;
+}
+
+static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ // There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
+ if (a.m_last_tx_time != b.m_last_tx_time) return a.m_last_tx_time < b.m_last_tx_time;
+ if (a.m_relay_txs != b.m_relay_txs) return b.m_relay_txs;
+ if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
+ return a.m_connected > b.m_connected;
+}
+
+// Pick out the potential block-relay only peers, and sort them by last block time.
+static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
+{
+ if (a.m_relay_txs != b.m_relay_txs) return a.m_relay_txs;
+ if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
+ if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
+ return a.m_connected > b.m_connected;
+}
+
+/**
+ * Sort eviction candidates by network/localhost and connection uptime.
+ * Candidates near the beginning are more likely to be evicted, and those
+ * near the end are more likely to be protected, e.g. less likely to be evicted.
+ * - First, nodes that are not `is_local` and that do not belong to `network`,
+ * sorted by increasing uptime (from most recently connected to connected longer).
+ * - Then, nodes that are `is_local` or belong to `network`, sorted by increasing uptime.
+ */
+struct CompareNodeNetworkTime {
+ const bool m_is_local;
+ const Network m_network;
+ CompareNodeNetworkTime(bool is_local, Network network) : m_is_local(is_local), m_network(network) {}
+ bool operator()(const NodeEvictionCandidate& a, const NodeEvictionCandidate& b) const
+ {
+ if (m_is_local && a.m_is_local != b.m_is_local) return b.m_is_local;
+ if ((a.m_network == m_network) != (b.m_network == m_network)) return b.m_network == m_network;
+ return a.m_connected > b.m_connected;
+ };
+};
+
+//! Sort an array by the specified comparator, then erase the last K elements where predicate is true.
+template <typename T, typename Comparator>
+static void EraseLastKElements(
+ std::vector<T>& elements, Comparator comparator, size_t k,
+ std::function<bool(const NodeEvictionCandidate&)> predicate = [](const NodeEvictionCandidate& n) { return true; })
+{
+ std::sort(elements.begin(), elements.end(), comparator);
+ size_t eraseSize = std::min(k, elements.size());
+ elements.erase(std::remove_if(elements.end() - eraseSize, elements.end(), predicate), elements.end());
+}
+
+void ProtectNoBanConnections(std::vector<NodeEvictionCandidate>& eviction_candidates)
+{
+ eviction_candidates.erase(std::remove_if(eviction_candidates.begin(), eviction_candidates.end(),
+ [](NodeEvictionCandidate const& n) {
+ return n.m_noban;
+ }),
+ eviction_candidates.end());
+}
+
+void ProtectOutboundConnections(std::vector<NodeEvictionCandidate>& eviction_candidates)
+{
+ eviction_candidates.erase(std::remove_if(eviction_candidates.begin(), eviction_candidates.end(),
+ [](NodeEvictionCandidate const& n) {
+ return n.m_conn_type != ConnectionType::INBOUND;
+ }),
+ eviction_candidates.end());
+}
+
+void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& eviction_candidates)
+{
+ // Protect the half of the remaining nodes which have been connected the longest.
+ // This replicates the non-eviction implicit behavior, and precludes attacks that start later.
+ // To favorise the diversity of our peer connections, reserve up to half of these protected
+ // spots for Tor/onion, localhost, I2P, and CJDNS peers, even if they're not longest uptime
+ // overall. This helps protect these higher-latency peers that tend to be otherwise
+ // disadvantaged under our eviction criteria.
+ const size_t initial_size = eviction_candidates.size();
+ const size_t total_protect_size{initial_size / 2};
+
+ // Disadvantaged networks to protect. In the case of equal counts, earlier array members
+ // have the first opportunity to recover unused slots from the previous iteration.
+ struct Net { bool is_local; Network id; size_t count; };
+ std::array<Net, 4> networks{
+ {{false, NET_CJDNS, 0}, {false, NET_I2P, 0}, {/*localhost=*/true, NET_MAX, 0}, {false, NET_ONION, 0}}};
+
+ // Count and store the number of eviction candidates per network.
+ for (Net& n : networks) {
+ n.count = std::count_if(eviction_candidates.cbegin(), eviction_candidates.cend(),
+ [&n](const NodeEvictionCandidate& c) {
+ return n.is_local ? c.m_is_local : c.m_network == n.id;
+ });
+ }
+ // Sort `networks` by ascending candidate count, to give networks having fewer candidates
+ // the first opportunity to recover unused protected slots from the previous iteration.
+ std::stable_sort(networks.begin(), networks.end(), [](Net a, Net b) { return a.count < b.count; });
+
+ // Protect up to 25% of the eviction candidates by disadvantaged network.
+ const size_t max_protect_by_network{total_protect_size / 2};
+ size_t num_protected{0};
+
+ while (num_protected < max_protect_by_network) {
+ // Count the number of disadvantaged networks from which we have peers to protect.
+ auto num_networks = std::count_if(networks.begin(), networks.end(), [](const Net& n) { return n.count; });
+ if (num_networks == 0) {
+ break;
+ }
+ const size_t disadvantaged_to_protect{max_protect_by_network - num_protected};
+ const size_t protect_per_network{std::max(disadvantaged_to_protect / num_networks, static_cast<size_t>(1))};
+ // Early exit flag if there are no remaining candidates by disadvantaged network.
+ bool protected_at_least_one{false};
+
+ for (Net& n : networks) {
+ if (n.count == 0) continue;
+ const size_t before = eviction_candidates.size();
+ EraseLastKElements(eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id),
+ protect_per_network, [&n](const NodeEvictionCandidate& c) {
+ return n.is_local ? c.m_is_local : c.m_network == n.id;
+ });
+ const size_t after = eviction_candidates.size();
+ if (before > after) {
+ protected_at_least_one = true;
+ const size_t delta{before - after};
+ num_protected += delta;
+ if (num_protected >= max_protect_by_network) {
+ break;
+ }
+ n.count -= delta;
+ }
+ }
+ if (!protected_at_least_one) {
+ break;
+ }
+ }
+
+ // Calculate how many we removed, and update our total number of peers that
+ // we want to protect based on uptime accordingly.
+ assert(num_protected == initial_size - eviction_candidates.size());
+ const size_t remaining_to_protect{total_protect_size - num_protected};
+ EraseLastKElements(eviction_candidates, ReverseCompareNodeTimeConnected, remaining_to_protect);
+}
+
+[[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates)
+{
+ // Protect connections with certain characteristics
+
+ ProtectNoBanConnections(vEvictionCandidates);
+
+ ProtectOutboundConnections(vEvictionCandidates);
+
+ // Deterministically select 4 peers to protect by netgroup.
+ // An attacker cannot predict which netgroups will be protected
+ EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4);
+ // Protect the 8 nodes with the lowest minimum ping time.
+ // An attacker cannot manipulate this metric without physically moving nodes closer to the target.
+ EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8);
+ // Protect 4 nodes that most recently sent us novel transactions accepted into our mempool.
+ // An attacker cannot manipulate this metric without performing useful work.
+ EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4);
+ // Protect up to 8 non-tx-relay peers that have sent us novel blocks.
+ EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8,
+ [](const NodeEvictionCandidate& n) { return !n.m_relay_txs && n.fRelevantServices; });
+
+ // Protect 4 nodes that most recently sent us novel blocks.
+ // An attacker cannot manipulate this metric without performing useful work.
+ EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4);
+
+ // Protect some of the remaining eviction candidates by ratios of desirable
+ // or disadvantaged characteristics.
+ ProtectEvictionCandidatesByRatio(vEvictionCandidates);
+
+ if (vEvictionCandidates.empty()) return std::nullopt;
+
+ // If any remaining peers are preferred for eviction consider only them.
+ // This happens after the other preferences since if a peer is really the best by other criteria (esp relaying blocks)
+ // then we probably don't want to evict it no matter what.
+ if (std::any_of(vEvictionCandidates.begin(),vEvictionCandidates.end(),[](NodeEvictionCandidate const &n){return n.prefer_evict;})) {
+ vEvictionCandidates.erase(std::remove_if(vEvictionCandidates.begin(),vEvictionCandidates.end(),
+ [](NodeEvictionCandidate const &n){return !n.prefer_evict;}),vEvictionCandidates.end());
+ }
+
+ // Identify the network group with the most connections and youngest member.
+ // (vEvictionCandidates is already sorted by reverse connect time)
+ uint64_t naMostConnections;
+ unsigned int nMostConnections = 0;
+ std::chrono::seconds nMostConnectionsTime{0};
+ std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapNetGroupNodes;
+ for (const NodeEvictionCandidate &node : vEvictionCandidates) {
+ std::vector<NodeEvictionCandidate> &group = mapNetGroupNodes[node.nKeyedNetGroup];
+ group.push_back(node);
+ const auto grouptime{group[0].m_connected};
+
+ if (group.size() > nMostConnections || (group.size() == nMostConnections && grouptime > nMostConnectionsTime)) {
+ nMostConnections = group.size();
+ nMostConnectionsTime = grouptime;
+ naMostConnections = node.nKeyedNetGroup;
+ }
+ }
+
+ // Reduce to the network group with the most connections
+ vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]);
+
+ // Disconnect from the network group with the most connections
+ return vEvictionCandidates.front().id;
+}
diff --git a/src/node/eviction.h b/src/node/eviction.h
new file mode 100644
index 0000000000..1bb32e5327
--- /dev/null
+++ b/src/node/eviction.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_EVICTION_H
+#define BITCOIN_NODE_EVICTION_H
+
+#include <node/connection_types.h>
+#include <net_permissions.h>
+
+#include <chrono>
+#include <cstdint>
+#include <optional>
+#include <vector>
+
+typedef int64_t NodeId;
+
+struct NodeEvictionCandidate {
+ NodeId id;
+ std::chrono::seconds m_connected;
+ std::chrono::microseconds m_min_ping_time;
+ std::chrono::seconds m_last_block_time;
+ std::chrono::seconds m_last_tx_time;
+ bool fRelevantServices;
+ bool m_relay_txs;
+ bool fBloomFilter;
+ uint64_t nKeyedNetGroup;
+ bool prefer_evict;
+ bool m_is_local;
+ Network m_network;
+ bool m_noban;
+ ConnectionType m_conn_type;
+};
+
+/**
+ * Select an inbound peer to evict after filtering out (protecting) peers having
+ * distinct, difficult-to-forge characteristics. The protection logic picks out
+ * fixed numbers of desirable peers per various criteria, followed by (mostly)
+ * ratios of desirable or disadvantaged peers. If any eviction candidates
+ * remain, the selection logic chooses a peer to evict.
+ */
+[[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates);
+
+/** Protect desirable or disadvantaged inbound peers from eviction by ratio.
+ *
+ * This function protects half of the peers which have been connected the
+ * longest, to replicate the non-eviction implicit behavior and preclude attacks
+ * that start later.
+ *
+ * Half of these protected spots (1/4 of the total) are reserved for the
+ * following categories of peers, sorted by longest uptime, even if they're not
+ * longest uptime overall:
+ *
+ * - onion peers connected via our tor control service
+ *
+ * - localhost peers, as manually configured hidden services not using
+ * `-bind=addr[:port]=onion` will not be detected as inbound onion connections
+ *
+ * - I2P peers
+ *
+ * - CJDNS peers
+ *
+ * This helps protect these privacy network peers, which tend to be otherwise
+ * disadvantaged under our eviction criteria for their higher min ping times
+ * relative to IPv4/IPv6 peers, and favorise the diversity of peer connections.
+ */
+void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvictionCandidates);
+
+#endif // BITCOIN_NODE_EVICTION_H
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index af9458206e..6846e992d4 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -845,7 +845,7 @@ static RPCHelpMan gettxoutsetinfo()
"Note this call may take some time if you are not using coinstatsindex.\n",
{
{"hash_type", RPCArg::Type::STR, RPCArg::Default{"hash_serialized_2"}, "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'muhash', 'none'."},
- {"hash_or_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
+ {"hash_or_height", RPCArg::Type::NUM, RPCArg::DefaultHint{"the current best block"}, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
{"use_index", RPCArg::Type::BOOL, RPCArg::Default{true}, "Use coinstatsindex, if available."},
},
RPCResult{
@@ -881,6 +881,7 @@ static RPCHelpMan gettxoutsetinfo()
HelpExampleCli("gettxoutsetinfo", R"("none")") +
HelpExampleCli("gettxoutsetinfo", R"("none" 1000)") +
HelpExampleCli("gettxoutsetinfo", R"("none" '"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09"')") +
+ HelpExampleCli("-named gettxoutsetinfo", R"(hash_type='muhash' use_index='false')") +
HelpExampleRpc("gettxoutsetinfo", "") +
HelpExampleRpc("gettxoutsetinfo", R"("none")") +
HelpExampleRpc("gettxoutsetinfo", R"("none", 1000)") +
@@ -917,6 +918,9 @@ static RPCHelpMan gettxoutsetinfo()
throw JSONRPCError(RPC_INVALID_PARAMETER, "hash_serialized_2 hash type cannot be queried for a specific block");
}
+ if (!index_requested) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot set use_index to false when querying for a specific block");
+ }
pindex = ParseHashOrHeight(request.params[1], chainman);
}
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index ae0d0112ba..9be3ab7df0 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -110,6 +110,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "sendrawtransaction", 1, "maxfeerate" },
{ "testmempoolaccept", 0, "rawtxs" },
{ "testmempoolaccept", 1, "maxfeerate" },
+ { "submitpackage", 0, "package" },
{ "combinerawtransaction", 0, "txs" },
{ "fundrawtransaction", 1, "options" },
{ "fundrawtransaction", 2, "iswitness" },
diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp
index c8208feaab..84d43e7818 100644
--- a/src/rpc/mempool.cpp
+++ b/src/rpc/mempool.cpp
@@ -5,6 +5,7 @@
#include <rpc/blockchain.h>
+#include <chainparams.h>
#include <core_io.h>
#include <fs.h>
#include <policy/rbf.h>
@@ -732,6 +733,150 @@ static RPCHelpMan savemempool()
};
}
+static RPCHelpMan submitpackage()
+{
+ return RPCHelpMan{"submitpackage",
+ "Submit a package of raw transactions (serialized, hex-encoded) to local node (-regtest only).\n"
+ "The package will be validated according to consensus and mempool policy rules. If all transactions pass, they will be accepted to mempool.\n"
+ "This RPC is experimental and the interface may be unstable. Refer to doc/policy/packages.md for documentation on package policies.\n"
+ "Warning: until package relay is in use, successful submission does not mean the transaction will propagate to other nodes on the network.\n"
+ "Currently, each transaction is broadcasted individually after submission, which means they must meet other nodes' feerate requirements alone.\n"
+ ,
+ {
+ {"package", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of raw transactions.",
+ {
+ {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
+ },
+ },
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "tx-results", "transaction results keyed by wtxid",
+ {
+ {RPCResult::Type::OBJ, "wtxid", "transaction wtxid", {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ {RPCResult::Type::STR_HEX, "other-wtxid", /*optional=*/true, "The wtxid of a different transaction with the same txid but different witness found in the mempool. This means the submitted transaction was ignored."},
+ {RPCResult::Type::NUM, "vsize", "Virtual transaction size as defined in BIP 141."},
+ {RPCResult::Type::OBJ, "fees", "Transaction fees", {
+ {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT},
+ }},
+ }}
+ }},
+ {RPCResult::Type::STR_AMOUNT, "package-feerate", /*optional=*/true, "package feerate used for feerate checks in " + CURRENCY_UNIT + " per KvB. Excludes transactions which were deduplicated or accepted individually."},
+ {RPCResult::Type::ARR, "replaced-transactions", /*optional=*/true, "List of txids of replaced transactions",
+ {
+ {RPCResult::Type::STR_HEX, "", "The transaction id"},
+ }},
+ },
+ },
+ RPCExamples{
+ HelpExampleCli("testmempoolaccept", "[rawtx1, rawtx2]") +
+ HelpExampleCli("submitpackage", "[rawtx1, rawtx2]")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+ {
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only");
+ }
+ RPCTypeCheck(request.params, {
+ UniValue::VARR,
+ });
+ const UniValue raw_transactions = request.params[0].get_array();
+ if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER,
+ "Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions.");
+ }
+
+ std::vector<CTransactionRef> txns;
+ txns.reserve(raw_transactions.size());
+ for (const auto& rawtx : raw_transactions.getValues()) {
+ CMutableTransaction mtx;
+ if (!DecodeHexTx(mtx, rawtx.get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
+ "TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input.");
+ }
+ txns.emplace_back(MakeTransactionRef(std::move(mtx)));
+ }
+
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ CTxMemPool& mempool = EnsureMemPool(node);
+ CChainState& chainstate = EnsureChainman(node).ActiveChainstate();
+ const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false));
+
+ // First catch any errors.
+ switch(package_result.m_state.GetResult()) {
+ case PackageValidationResult::PCKG_RESULT_UNSET: break;
+ case PackageValidationResult::PCKG_POLICY:
+ {
+ throw JSONRPCTransactionError(TransactionError::INVALID_PACKAGE,
+ package_result.m_state.GetRejectReason());
+ }
+ case PackageValidationResult::PCKG_MEMPOOL_ERROR:
+ {
+ throw JSONRPCTransactionError(TransactionError::MEMPOOL_ERROR,
+ package_result.m_state.GetRejectReason());
+ }
+ case PackageValidationResult::PCKG_TX:
+ {
+ for (const auto& tx : txns) {
+ auto it = package_result.m_tx_results.find(tx->GetWitnessHash());
+ if (it != package_result.m_tx_results.end() && it->second.m_state.IsInvalid()) {
+ throw JSONRPCTransactionError(TransactionError::MEMPOOL_REJECTED,
+ strprintf("%s failed: %s", tx->GetHash().ToString(), it->second.m_state.GetRejectReason()));
+ }
+ }
+ // If a PCKG_TX error was returned, there must have been an invalid transaction.
+ NONFATAL_UNREACHABLE();
+ }
+ }
+ for (const auto& tx : txns) {
+ size_t num_submitted{0};
+ std::string err_string;
+ const auto err = BroadcastTransaction(node, tx, err_string, 0, true, true);
+ if (err != TransactionError::OK) {
+ throw JSONRPCTransactionError(err,
+ strprintf("transaction broadcast failed: %s (all transactions were submitted, %d transactions were broadcast successfully)",
+ err_string, num_submitted));
+ }
+ }
+ UniValue rpc_result{UniValue::VOBJ};
+ UniValue tx_result_map{UniValue::VOBJ};
+ std::set<uint256> replaced_txids;
+ for (const auto& tx : txns) {
+ auto it = package_result.m_tx_results.find(tx->GetWitnessHash());
+ CHECK_NONFATAL(it != package_result.m_tx_results.end());
+ UniValue result_inner{UniValue::VOBJ};
+ result_inner.pushKV("txid", tx->GetHash().GetHex());
+ if (it->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS) {
+ result_inner.pushKV("other-wtxid", it->second.m_other_wtxid.value().GetHex());
+ }
+ if (it->second.m_result_type == MempoolAcceptResult::ResultType::VALID ||
+ it->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY) {
+ result_inner.pushKV("vsize", int64_t{it->second.m_vsize.value()});
+ UniValue fees(UniValue::VOBJ);
+ fees.pushKV("base", ValueFromAmount(it->second.m_base_fees.value()));
+ result_inner.pushKV("fees", fees);
+ if (it->second.m_replaced_transactions.has_value()) {
+ for (const auto& ptx : it->second.m_replaced_transactions.value()) {
+ replaced_txids.insert(ptx->GetHash());
+ }
+ }
+ }
+ tx_result_map.pushKV(tx->GetWitnessHash().GetHex(), result_inner);
+ }
+ rpc_result.pushKV("tx-results", tx_result_map);
+ if (package_result.m_package_feerate.has_value()) {
+ rpc_result.pushKV("package-feerate", ValueFromAmount(package_result.m_package_feerate.value().GetFeePerK()));
+ }
+ UniValue replaced_list(UniValue::VARR);
+ for (const uint256& hash : replaced_txids) replaced_list.push_back(hash.ToString());
+ rpc_result.pushKV("replaced-transactions", replaced_list);
+ return rpc_result;
+ },
+ };
+}
+
void RegisterMempoolRPCCommands(CRPCTable& t)
{
static const CRPCCommand commands[]{
@@ -744,6 +889,7 @@ void RegisterMempoolRPCCommands(CRPCTable& t)
{"blockchain", &getmempoolinfo},
{"blockchain", &getrawmempool},
{"blockchain", &savemempool},
+ {"hidden", &submitpackage},
};
for (const auto& c : commands) {
t.appendCommand(c.name, &c);
diff --git a/src/test/fuzz/node_eviction.cpp b/src/test/fuzz/node_eviction.cpp
index 6a363f00f7..e27b254580 100644
--- a/src/test/fuzz/node_eviction.cpp
+++ b/src/test/fuzz/node_eviction.cpp
@@ -32,6 +32,8 @@ FUZZ_TARGET(node_eviction)
/*prefer_evict=*/fuzzed_data_provider.ConsumeBool(),
/*m_is_local=*/fuzzed_data_provider.ConsumeBool(),
/*m_network=*/fuzzed_data_provider.PickValueInArray(ALL_NETWORKS),
+ /*m_noban=*/fuzzed_data_provider.ConsumeBool(),
+ /*m_conn_type=*/fuzzed_data_provider.PickValueInArray(ALL_CONNECTION_TYPES),
});
}
// Make a copy since eviction_candidates may be in some valid but otherwise
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index e4e83c3f32..26913a41d2 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -159,6 +159,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"signrawtransactionwithkey",
"submitblock",
"submitheader",
+ "submitpackage",
"syncwithvalidationinterfacequeue",
"testmempoolaccept",
"uptime",
diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp
new file mode 100644
index 0000000000..d318baa6a2
--- /dev/null
+++ b/src/test/fuzz/txorphan.cpp
@@ -0,0 +1,143 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <consensus/amount.h>
+#include <net.h>
+#include <net_processing.h>
+#include <primitives/transaction.h>
+#include <script/script.h>
+#include <sync.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <test/util/setup_common.h>
+#include <txorphanage.h>
+#include <uint256.h>
+#include <util/check.h>
+#include <util/time.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+void initialize_orphanage()
+{
+ static const auto testing_setup = MakeNoLogFileContext();
+}
+
+FUZZ_TARGET_INIT(txorphan, initialize_orphanage)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ SetMockTime(ConsumeTime(fuzzed_data_provider));
+
+ TxOrphanage orphanage;
+ std::set<uint256> orphan_work_set;
+ std::vector<COutPoint> outpoints;
+ // initial outpoints used to construct transactions later
+ for (uint8_t i = 0; i < 4; i++) {
+ outpoints.emplace_back(uint256{i}, 0);
+ }
+ // if true, allow duplicate input when constructing tx
+ const bool duplicate_input = fuzzed_data_provider.ConsumeBool();
+
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10 * DEFAULT_MAX_ORPHAN_TRANSACTIONS)
+ {
+ // construct transaction
+ const CTransactionRef tx = [&] {
+ CMutableTransaction tx_mut;
+ const auto num_in = fuzzed_data_provider.ConsumeIntegralInRange<uint32_t>(1, outpoints.size());
+ const auto num_out = fuzzed_data_provider.ConsumeIntegralInRange<uint32_t>(1, outpoints.size());
+ // pick unique outpoints from outpoints as input
+ for (uint32_t i = 0; i < num_in; i++) {
+ auto& prevout = PickValue(fuzzed_data_provider, outpoints);
+ tx_mut.vin.emplace_back(prevout);
+ // pop the picked outpoint if duplicate input is not allowed
+ if (!duplicate_input) {
+ std::swap(prevout, outpoints.back());
+ outpoints.pop_back();
+ }
+ }
+ // output amount will not affect txorphanage
+ for (uint32_t i = 0; i < num_out; i++) {
+ tx_mut.vout.emplace_back(CAmount{0}, CScript{});
+ }
+ // restore previously poped outpoints
+ for (auto& in : tx_mut.vin) {
+ outpoints.push_back(in.prevout);
+ }
+ const auto new_tx = MakeTransactionRef(tx_mut);
+ // add newly constructed transaction to outpoints
+ for (uint32_t i = 0; i < num_out; i++) {
+ outpoints.emplace_back(new_tx->GetHash(), i);
+ }
+ return new_tx;
+ }();
+
+ // trigger orphanage functions
+ LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10 * DEFAULT_MAX_ORPHAN_TRANSACTIONS)
+ {
+ NodeId peer_id = fuzzed_data_provider.ConsumeIntegral<NodeId>();
+
+ CallOneOf(
+ fuzzed_data_provider,
+ [&] {
+ LOCK(g_cs_orphans);
+ orphanage.AddChildrenToWorkSet(*tx, orphan_work_set);
+ },
+ [&] {
+ bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetHash()));
+ {
+ LOCK(g_cs_orphans);
+ bool get_tx = orphanage.GetTx(tx->GetHash()).first != nullptr;
+ Assert(have_tx == get_tx);
+ }
+ },
+ [&] {
+ bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetHash()));
+ // AddTx should return false if tx is too big or already have it
+ {
+ LOCK(g_cs_orphans);
+ Assert(have_tx != orphanage.AddTx(tx, peer_id));
+ }
+ have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetHash()));
+ // tx should already be added since it will not be too big in the test
+ // have_tx should be true and AddTx should fail
+ {
+ LOCK(g_cs_orphans);
+ Assert(have_tx && !orphanage.AddTx(tx, peer_id));
+ }
+ },
+ [&] {
+ bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetHash()));
+ // EraseTx should return 0 if m_orphans doesn't have the tx
+ {
+ LOCK(g_cs_orphans);
+ Assert(have_tx == orphanage.EraseTx(tx->GetHash()));
+ }
+ have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetHash()));
+ // have_tx should be false and EraseTx should fail
+ {
+ LOCK(g_cs_orphans);
+ Assert(!have_tx && !orphanage.EraseTx(tx->GetHash()));
+ }
+ },
+ [&] {
+ LOCK(g_cs_orphans);
+ orphanage.EraseForPeer(peer_id);
+ },
+ [&] {
+ // test mocktime and expiry
+ SetMockTime(ConsumeTime(fuzzed_data_provider));
+ auto size_before = orphanage.Size();
+ auto limit = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
+ auto n_evicted = WITH_LOCK(g_cs_orphans, return orphanage.LimitOrphans(limit));
+ Assert(size_before - n_evicted <= limit);
+ Assert(orphanage.Size() <= limit);
+ });
+ }
+ }
+}
diff --git a/src/test/util/net.cpp b/src/test/util/net.cpp
index 62b770753a..071193b550 100644
--- a/src/test/util/net.cpp
+++ b/src/test/util/net.cpp
@@ -5,6 +5,7 @@
#include <test/util/net.h>
#include <chainparams.h>
+#include <node/eviction.h>
#include <net.h>
#include <span.h>
@@ -58,6 +59,8 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candida
/*prefer_evict=*/random_context.randbool(),
/*m_is_local=*/random_context.randbool(),
/*m_network=*/ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())],
+ /*m_noban=*/false,
+ /*m_conn_type=*/ConnectionType::INBOUND,
});
}
return candidates;
diff --git a/src/test/util/net.h b/src/test/util/net.h
index c5dbaeca3e..34ab9958c6 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -6,6 +6,7 @@
#define BITCOIN_TEST_UTIL_NET_H
#include <compat.h>
+#include <node/eviction.h>
#include <netaddress.h>
#include <net.h>
#include <util/sock.h>
diff --git a/src/util/error.cpp b/src/util/error.cpp
index af8cbd0353..22a5964279 100644
--- a/src/util/error.cpp
+++ b/src/util/error.cpp
@@ -35,6 +35,8 @@ bilingual_str TransactionErrorString(const TransactionError err)
return Untranslated("External signer not found");
case TransactionError::EXTERNAL_SIGNER_FAILED:
return Untranslated("External signer failed to sign");
+ case TransactionError::INVALID_PACKAGE:
+ return Untranslated("Transaction rejected due to invalid package");
// no default case, so the compiler can warn about missing cases
}
assert(false);
diff --git a/src/util/error.h b/src/util/error.h
index 4cc35eb1fd..0429de651a 100644
--- a/src/util/error.h
+++ b/src/util/error.h
@@ -32,6 +32,7 @@ enum class TransactionError {
MAX_FEE_EXCEEDED,
EXTERNAL_SIGNER_NOT_FOUND,
EXTERNAL_SIGNER_FAILED,
+ INVALID_PACKAGE,
};
bilingual_str TransactionErrorString(const TransactionError error);
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index f8230f7a1d..dbd768a758 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -315,12 +315,6 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const bool read_only, b
env = database.env.get();
pdb = database.m_db.get();
strFile = fs::PathToString(database.m_filename);
- if (!Exists(std::string("version"))) {
- bool fTmp = fReadOnly;
- fReadOnly = false;
- Write(std::string("version"), CLIENT_VERSION);
- fReadOnly = fTmp;
- }
}
void BerkeleyDatabase::Open()
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 81055a5a1a..1d22d0993e 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -397,10 +397,13 @@ std::optional<SelectionResult> AttemptSelection(const CWallet& wallet, const CAm
// The knapsack solver has some legacy behavior where it will spend dust outputs. We retain this behavior, so don't filter for positive only here.
std::vector<OutputGroup> all_groups = GroupOutputs(wallet, coins, coin_selection_params, eligibility_filter, false /* positive_only */);
+ CAmount target_with_change = nTargetValue;
// While nTargetValue includes the transaction fees for non-input things, it does not include the fee for creating a change output.
- // So we need to include that for KnapsackSolver as well, as we are expecting to create a change output.
- if (auto knapsack_result{KnapsackSolver(all_groups, nTargetValue + coin_selection_params.m_change_fee,
- coin_selection_params.m_min_change_target, coin_selection_params.rng_fast)}) {
+ // So we need to include that for KnapsackSolver and SRD as well, as we are expecting to create a change output.
+ if (!coin_selection_params.m_subtract_fee_outputs) {
+ target_with_change += coin_selection_params.m_change_fee;
+ }
+ if (auto knapsack_result{KnapsackSolver(all_groups, target_with_change, coin_selection_params.m_min_change_target, coin_selection_params.rng_fast)}) {
knapsack_result->ComputeAndSetWaste(coin_selection_params.m_cost_of_change);
results.push_back(*knapsack_result);
}
@@ -409,7 +412,7 @@ std::optional<SelectionResult> AttemptSelection(const CWallet& wallet, const CAm
// barely meets the target. Just use the lower bound change target instead of the randomly
// generated one, since SRD will result in a random change amount anyway; avoid making the
// target needlessly large.
- const CAmount srd_target = nTargetValue + coin_selection_params.m_change_fee + CHANGE_LOWER;
+ const CAmount srd_target = target_with_change + CHANGE_LOWER;
if (auto srd_result{SelectCoinsSRD(positive_groups, srd_target, coin_selection_params.rng_fast)}) {
srd_result->ComputeAndSetWaste(coin_selection_params.m_cost_of_change);
results.push_back(*srd_result);
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index a04446c840..8afd3f416d 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -883,12 +883,10 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
if (result != DBErrors::LOAD_OK)
return result;
- // Last client version to open this wallet, was previously the file version number
+ // Last client version to open this wallet
int last_client = CLIENT_VERSION;
- m_batch->Read(DBKeys::VERSION, last_client);
-
- int wallet_version = pwallet->GetVersion();
- pwallet->WalletLogPrintf("Wallet File Version = %d\n", wallet_version > 0 ? wallet_version : last_client);
+ bool has_last_client = m_batch->Read(DBKeys::VERSION, last_client);
+ pwallet->WalletLogPrintf("Wallet file version = %d, last client version = %d\n", pwallet->GetVersion(), last_client);
pwallet->WalletLogPrintf("Keys: %u plaintext, %u encrypted, %u w/ metadata, %u total. Unknown wallet records: %u\n",
wss.nKeys, wss.nCKeys, wss.nKeyMeta, wss.nKeys + wss.nCKeys, wss.m_unknown_records);
@@ -909,7 +907,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
if (wss.fIsEncrypted && (last_client == 40000 || last_client == 50000))
return DBErrors::NEED_REWRITE;
- if (last_client < CLIENT_VERSION) // Update
+ if (!has_last_client || last_client != CLIENT_VERSION) // Update
m_batch->Write(DBKeys::VERSION, CLIENT_VERSION);
if (wss.fAnyUnordered)