aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/addrman.h21
-rw-r--r--src/consensus/params.h4
-rw-r--r--src/deploymentstatus.cpp17
-rw-r--r--src/init.cpp4
-rw-r--r--src/net_processing.cpp13
-rw-r--r--src/net_processing.h5
-rw-r--r--src/node/transaction.cpp2
-rw-r--r--src/test/denialofservice_tests.cpp8
-rw-r--r--src/test/util/setup_common.cpp2
9 files changed, 52 insertions, 24 deletions
diff --git a/src/addrman.h b/src/addrman.h
index 4d8d05a99a..736d9783e6 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -104,19 +104,23 @@ public:
* * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
*
* To that end:
- * * Addresses are organized into buckets.
- * * Addresses that have not yet been tried go into 1024 "new" buckets.
- * * Based on the address range (/16 for IPv4) of the source of information, 64 buckets are selected at random.
+ * * Addresses are organized into buckets that can each store up to 64 entries.
+ * * Addresses to which our node has not successfully connected go into 1024 "new" buckets.
+ * * Based on the address range (/16 for IPv4) of the source of information, or if an asmap is provided,
+ * the AS it belongs to (for IPv4/IPv6), 64 buckets are selected at random.
* * The actual bucket is chosen from one of these, based on the range in which the address itself is located.
+ * * The position in the bucket is chosen based on the full address.
* * One single address can occur in up to 8 different buckets to increase selection chances for addresses that
* are seen frequently. The chance for increasing this multiplicity decreases exponentially.
- * * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
- * ones) is removed from it first.
+ * * When adding a new address to an occupied position of a bucket, it will not replace the existing entry
+ * unless that address is also stored in another bucket or it doesn't meet one of several quality criteria
+ * (see IsTerrible for exact criteria).
* * Addresses of nodes that are known to be accessible go into 256 "tried" buckets.
* * Each address range selects at random 8 of these buckets.
* * The actual bucket is chosen from one of these, based on the full address.
- * * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently
- * tried ones) is evicted from it, back to the "new" buckets.
+ * * When adding a new good address to an occupied position of a bucket, a FEELER connection to the
+ * old address is attempted. The old entry is only replaced and moved back to the "new" buckets if this
+ * attempt was unsuccessful.
* * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not
* be observable by adversaries.
* * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive)
@@ -696,8 +700,7 @@ private:
//! Find an entry.
CAddrInfo* Find(const CNetAddr& addr, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
- //! find an entry, creating it if necessary.
- //! nTime and nServices of the found node are updated, if necessary.
+ //! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom.
CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
//! Swap two elements in vRandom.
diff --git a/src/consensus/params.h b/src/consensus/params.h
index 9205cfee87..77bf7fd0d8 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -23,7 +23,7 @@ enum BuriedDeployment : int16_t {
DEPLOYMENT_CSV,
DEPLOYMENT_SEGWIT,
};
-constexpr bool ValidDeployment(BuriedDeployment dep) { return DEPLOYMENT_HEIGHTINCB <= dep && dep <= DEPLOYMENT_SEGWIT; }
+constexpr bool ValidDeployment(BuriedDeployment dep) { return dep <= DEPLOYMENT_SEGWIT; }
enum DeploymentPos : uint16_t {
DEPLOYMENT_TESTDUMMY,
@@ -31,7 +31,7 @@ enum DeploymentPos : uint16_t {
// NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp
MAX_VERSION_BITS_DEPLOYMENTS
};
-constexpr bool ValidDeployment(DeploymentPos dep) { return DEPLOYMENT_TESTDUMMY <= dep && dep <= DEPLOYMENT_TAPROOT; }
+constexpr bool ValidDeployment(DeploymentPos dep) { return dep < MAX_VERSION_BITS_DEPLOYMENTS; }
/**
* Struct for each individual consensus rule change using BIP9.
diff --git a/src/deploymentstatus.cpp b/src/deploymentstatus.cpp
index 9007800421..bba86639a3 100644
--- a/src/deploymentstatus.cpp
+++ b/src/deploymentstatus.cpp
@@ -7,6 +7,8 @@
#include <consensus/params.h>
#include <versionbits.h>
+#include <type_traits>
+
VersionBitsCache g_versionbitscache;
/* Basic sanity checking for BuriedDeployment/DeploymentPos enums and
@@ -15,3 +17,18 @@ VersionBitsCache g_versionbitscache;
static_assert(ValidDeployment(Consensus::DEPLOYMENT_TESTDUMMY), "sanity check of DeploymentPos failed (TESTDUMMY not valid)");
static_assert(!ValidDeployment(Consensus::MAX_VERSION_BITS_DEPLOYMENTS), "sanity check of DeploymentPos failed (MAX value considered valid)");
static_assert(!ValidDeployment(static_cast<Consensus::BuriedDeployment>(Consensus::DEPLOYMENT_TESTDUMMY)), "sanity check of BuriedDeployment failed (overlaps with DeploymentPos)");
+
+/* ValidDeployment only checks upper bounds for ensuring validity.
+ * This checks that the lowest possible value or the type is also a
+ * (specific) valid deployment so that lower bounds don't need to be checked.
+ */
+
+template<typename T, T x>
+static constexpr bool is_minimum()
+{
+ using U = typename std::underlying_type<T>::type;
+ return x == std::numeric_limits<U>::min();
+}
+
+static_assert(is_minimum<Consensus::BuriedDeployment, Consensus::DEPLOYMENT_HEIGHTINCB>(), "heightincb is not minimum value for BuriedDeployment");
+static_assert(is_minimum<Consensus::DeploymentPos, Consensus::DEPLOYMENT_TESTDUMMY>(), "testdummy is not minimum value for DeploymentPos");
diff --git a/src/init.cpp b/src/init.cpp
index aee8b78999..1b406bed28 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1185,7 +1185,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
assert(!node.peerman);
node.peerman = PeerManager::make(chainparams, *node.connman, *node.addrman, node.banman.get(),
- *node.scheduler, chainman, *node.mempool, ignores_incoming_txs);
+ chainman, *node.mempool, ignores_incoming_txs);
RegisterValidationInterface(node.peerman.get());
// sanitize comments per BIP-0014, format user agent and check total size
@@ -1794,6 +1794,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
banman->DumpBanlist();
}, DUMP_BANS_INTERVAL);
+ if (node.peerman) node.peerman->StartScheduledTasks(*node.scheduler);
+
#if HAVE_SYSTEM
StartupNotify(args);
#endif
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 3a85e99a5f..9da2fe5d6f 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -292,7 +292,7 @@ class PeerManagerImpl final : public PeerManager
{
public:
PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs);
/** Overridden from CValidationInterface. */
@@ -309,6 +309,7 @@ public:
bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing);
/** Implement PeerManager */
+ void StartScheduledTasks(CScheduler& scheduler) override;
void CheckForStaleTipAndEvictPeers() override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
@@ -1419,14 +1420,14 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
}
std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs)
{
- return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, scheduler, chainman, pool, ignore_incoming_txs);
+ return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, chainman, pool, ignore_incoming_txs);
}
PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs)
: m_chainparams(chainparams),
m_connman(connman),
@@ -1436,6 +1437,10 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn
m_mempool(pool),
m_ignore_incoming_txs(ignore_incoming_txs)
{
+}
+
+void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
+{
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
// combine them in one function and schedule at the quicker (peer-eviction)
diff --git a/src/net_processing.h b/src/net_processing.h
index 4532a0505e..9d8d788583 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -38,10 +38,13 @@ class PeerManager : public CValidationInterface, public NetEventsInterface
{
public:
static std::unique_ptr<PeerManager> make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
- BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
+ BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs);
virtual ~PeerManager() { }
+ /** Begin running background tasks, should only be called once */
+ virtual void StartScheduledTasks(CScheduler& scheduler) = 0;
+
/** Get statistics from node state */
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const = 0;
diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp
index 1861755aff..2a7bcc057f 100644
--- a/src/node/transaction.cpp
+++ b/src/node/transaction.cpp
@@ -125,8 +125,6 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t
CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
{
- LOCK(cs_main);
-
if (mempool && !block_index) {
CTransactionRef ptx = mempool->get(hash);
if (ptx) return ptx;
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 5668ead1fb..0bfe6eecd9 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
const CChainParams& chainparams = Params();
auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr,
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
const CChainParams& chainparams = Params();
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr,
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
CConnman::Options options;
@@ -194,7 +194,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(),
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
CNetAddr tor_netaddr;
BOOST_REQUIRE(
@@ -288,7 +288,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman);
auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(),
- *m_node.scheduler, *m_node.chainman, *m_node.mempool, false);
+ *m_node.chainman, *m_node.mempool, false);
banman->ClearBanned();
int64_t nStartTime = GetTime();
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 5334c4623e..2d044af184 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -197,7 +197,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests.
m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman,
- m_node.banman.get(), *m_node.scheduler, *m_node.chainman,
+ m_node.banman.get(), *m_node.chainman,
*m_node.mempool, false);
{
CConnman::Options options;