aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/release-notes-19988.md9
-rw-r--r--src/Makefile.am3
-rw-r--r--src/Makefile.test.include9
-rw-r--r--src/limitedmap.h100
-rw-r--r--src/net.h1
-rw-r--r--src/net_permissions.cpp2
-rw-r--r--src/net_permissions.h1
-rw-r--r--src/net_processing.cpp315
-rw-r--r--src/net_processing.h8
-rw-r--r--src/primitives/transaction.h4
-rw-r--r--src/secp256k1/.travis.yml20
-rw-r--r--src/secp256k1/README.md2
-rw-r--r--src/secp256k1/configure.ac31
-rwxr-xr-xsrc/secp256k1/contrib/travis.sh15
-rw-r--r--src/secp256k1/sage/gen_exhaustive_groups.sage129
-rw-r--r--src/secp256k1/src/assumptions.h8
-rw-r--r--src/secp256k1/src/basic-config.h1
-rw-r--r--src/secp256k1/src/bench_internal.c4
-rw-r--r--src/secp256k1/src/ecmult.h2
-rw-r--r--src/secp256k1/src/ecmult_const_impl.h20
-rw-r--r--src/secp256k1/src/ecmult_impl.h157
-rw-r--r--src/secp256k1/src/group.h14
-rw-r--r--src/secp256k1/src/group_impl.h118
-rw-r--r--src/secp256k1/src/modules/ecdh/tests_impl.h4
-rw-r--r--src/secp256k1/src/modules/extrakeys/Makefile.am.include1
-rw-r--r--src/secp256k1/src/modules/extrakeys/main_impl.h5
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h68
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_impl.h96
-rw-r--r--src/secp256k1/src/modules/recovery/Makefile.am.include1
-rw-r--r--src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h149
-rw-r--r--src/secp256k1/src/modules/recovery/tests_impl.h10
-rw-r--r--src/secp256k1/src/modules/schnorrsig/Makefile.am.include1
-rw-r--r--src/secp256k1/src/modules/schnorrsig/main_impl.h39
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h206
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_impl.h52
-rw-r--r--src/secp256k1/src/scalar.h11
-rw-r--r--src/secp256k1/src/scalar_4x64_impl.h12
-rw-r--r--src/secp256k1/src/scalar_8x32_impl.h20
-rw-r--r--src/secp256k1/src/scalar_impl.h250
-rw-r--r--src/secp256k1/src/scalar_low_impl.h11
-rw-r--r--src/secp256k1/src/scratch_impl.h10
-rw-r--r--src/secp256k1/src/secp256k1.c3
-rw-r--r--src/secp256k1/src/selftest.h2
-rw-r--r--src/secp256k1/src/testrand.h22
-rw-r--r--src/secp256k1/src/testrand_impl.h72
-rw-r--r--src/secp256k1/src/tests.c555
-rw-r--r--src/secp256k1/src/tests_exhaustive.c374
-rw-r--r--src/secp256k1/src/util.h18
-rw-r--r--src/secp256k1/src/valgrind_ctime_test.c20
-rw-r--r--src/test/fuzz/txrequest.cpp374
-rw-r--r--src/test/limitedmap_tests.cpp101
-rw-r--r--src/test/txrequest_tests.cpp738
-rw-r--r--src/txrequest.cpp748
-rw-r--r--src/txrequest.h211
-rw-r--r--src/uint256.cpp1
-rw-r--r--src/uint256.h1
-rwxr-xr-xtest/functional/p2p_tx_download.py123
57 files changed, 3886 insertions, 1396 deletions
diff --git a/doc/release-notes-19988.md b/doc/release-notes-19988.md
new file mode 100644
index 0000000000..ef26eb3032
--- /dev/null
+++ b/doc/release-notes-19988.md
@@ -0,0 +1,9 @@
+P2P changes
+-----------
+
+The size of the set of transactions that peers have announced and we consider
+for requests has been reduced from 100000 to 5000 (per peer), and further
+announcements will be ignored when that limit is reached. If you need to
+dump (very) large batches of transactions, exceptions can be made for trusted
+peers using the "relay" network permission. For localhost for example it can
+be enabled using the command line option `-whitelist=relay@127.0.0.1`.
diff --git a/src/Makefile.am b/src/Makefile.am
index e359ae2cb2..b0d36717ce 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -151,7 +151,6 @@ BITCOIN_CORE_H = \
interfaces/wallet.h \
key.h \
key_io.h \
- limitedmap.h \
logging.h \
logging/timer.h \
memusage.h \
@@ -215,6 +214,7 @@ BITCOIN_CORE_H = \
timedata.h \
torcontrol.h \
txdb.h \
+ txrequest.h \
txmempool.h \
undo.h \
util/asmap.h \
@@ -328,6 +328,7 @@ libbitcoin_server_a_SOURCES = \
timedata.cpp \
torcontrol.cpp \
txdb.cpp \
+ txrequest.cpp \
txmempool.cpp \
validation.cpp \
validationinterface.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index c6a4a4edc4..835b12ce3e 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -151,6 +151,7 @@ FUZZ_TARGETS = \
test/fuzz/tx_in_deserialize \
test/fuzz/tx_out \
test/fuzz/txoutcompressor_deserialize \
+ test/fuzz/txrequest \
test/fuzz/txundo_deserialize \
test/fuzz/uint160_deserialize \
test/fuzz/uint256_deserialize
@@ -235,7 +236,6 @@ BITCOIN_TESTS =\
test/interfaces_tests.cpp \
test/key_io_tests.cpp \
test/key_tests.cpp \
- test/limitedmap_tests.cpp \
test/logging_tests.cpp \
test/dbwrapper_tests.cpp \
test/validation_tests.cpp \
@@ -275,6 +275,7 @@ BITCOIN_TESTS =\
test/torcontrol_tests.cpp \
test/transaction_tests.cpp \
test/txindex_tests.cpp \
+ test/txrequest_tests.cpp \
test/txvalidation_tests.cpp \
test/txvalidationcache_tests.cpp \
test/uint256_tests.cpp \
@@ -1214,6 +1215,12 @@ test_fuzz_txoutcompressor_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_txoutcompressor_deserialize_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON)
test_fuzz_txoutcompressor_deserialize_SOURCES = test/fuzz/deserialize.cpp
+test_fuzz_txrequest_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_txrequest_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_txrequest_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_txrequest_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON)
+test_fuzz_txrequest_SOURCES = test/fuzz/txrequest.cpp
+
test_fuzz_txundo_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTXUNDO_DESERIALIZE=1
test_fuzz_txundo_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_txundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
diff --git a/src/limitedmap.h b/src/limitedmap.h
deleted file mode 100644
index 7d66964e36..0000000000
--- a/src/limitedmap.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2012-2018 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_LIMITEDMAP_H
-#define BITCOIN_LIMITEDMAP_H
-
-#include <assert.h>
-#include <map>
-
-/** STL-like map container that only keeps the N elements with the highest value. */
-template <typename K, typename V>
-class limitedmap
-{
-public:
- typedef K key_type;
- typedef V mapped_type;
- typedef std::pair<const key_type, mapped_type> value_type;
- typedef typename std::map<K, V>::const_iterator const_iterator;
- typedef typename std::map<K, V>::size_type size_type;
-
-protected:
- std::map<K, V> map;
- typedef typename std::map<K, V>::iterator iterator;
- std::multimap<V, iterator> rmap;
- typedef typename std::multimap<V, iterator>::iterator rmap_iterator;
- size_type nMaxSize;
-
-public:
- explicit limitedmap(size_type nMaxSizeIn)
- {
- assert(nMaxSizeIn > 0);
- nMaxSize = nMaxSizeIn;
- }
- const_iterator begin() const { return map.begin(); }
- const_iterator end() const { return map.end(); }
- size_type size() const { return map.size(); }
- bool empty() const { return map.empty(); }
- const_iterator find(const key_type& k) const { return map.find(k); }
- size_type count(const key_type& k) const { return map.count(k); }
- void insert(const value_type& x)
- {
- std::pair<iterator, bool> ret = map.insert(x);
- if (ret.second) {
- if (map.size() > nMaxSize) {
- map.erase(rmap.begin()->second);
- rmap.erase(rmap.begin());
- }
- rmap.insert(make_pair(x.second, ret.first));
- }
- }
- void erase(const key_type& k)
- {
- iterator itTarget = map.find(k);
- if (itTarget == map.end())
- return;
- std::pair<rmap_iterator, rmap_iterator> itPair = rmap.equal_range(itTarget->second);
- for (rmap_iterator it = itPair.first; it != itPair.second; ++it)
- if (it->second == itTarget) {
- rmap.erase(it);
- map.erase(itTarget);
- return;
- }
- // Shouldn't ever get here
- assert(0);
- }
- void update(const_iterator itIn, const mapped_type& v)
- {
- // Using map::erase() with empty range instead of map::find() to get a non-const iterator,
- // since it is a constant time operation in C++11. For more details, see
- // https://stackoverflow.com/questions/765148/how-to-remove-constness-of-const-iterator
- iterator itTarget = map.erase(itIn, itIn);
-
- if (itTarget == map.end())
- return;
- std::pair<rmap_iterator, rmap_iterator> itPair = rmap.equal_range(itTarget->second);
- for (rmap_iterator it = itPair.first; it != itPair.second; ++it)
- if (it->second == itTarget) {
- rmap.erase(it);
- itTarget->second = v;
- rmap.insert(make_pair(v, itTarget));
- return;
- }
- // Shouldn't ever get here
- assert(0);
- }
- size_type max_size() const { return nMaxSize; }
- size_type max_size(size_type s)
- {
- assert(s > 0);
- while (map.size() > s) {
- map.erase(rmap.begin()->second);
- rmap.erase(rmap.begin());
- }
- nMaxSize = s;
- return nMaxSize;
- }
-};
-
-#endif // BITCOIN_LIMITEDMAP_H
diff --git a/src/net.h b/src/net.h
index 7c63516394..4ba872d02c 100644
--- a/src/net.h
+++ b/src/net.h
@@ -14,7 +14,6 @@
#include <compat.h>
#include <crypto/siphash.h>
#include <hash.h>
-#include <limitedmap.h>
#include <net_permissions.h>
#include <netaddress.h>
#include <optional.h>
diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp
index 53648deb40..d40fdfb113 100644
--- a/src/net_permissions.cpp
+++ b/src/net_permissions.cpp
@@ -12,7 +12,7 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{
"bloomfilter (allow requesting BIP37 filtered blocks and transactions)",
"noban (do not ban for misbehavior; implies download)",
"forcerelay (relay transactions that are already in the mempool; implies relay)",
- "relay (relay even in -blocksonly mode)",
+ "relay (relay even in -blocksonly mode, and unlimited transaction announcements)",
"mempool (allow requesting BIP35 mempool contents)",
"download (allow getheaders during IBD, no disconnect after maxuploadtarget limit)",
"addr (responses to GETADDR avoid hitting the cache and contain random records with the most up-to-date info)"
diff --git a/src/net_permissions.h b/src/net_permissions.h
index 5b68f635a7..bba0ea1695 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -19,6 +19,7 @@ enum NetPermissionFlags {
// Can query bloomfilter even if -peerbloomfilters is false
PF_BLOOMFILTER = (1U << 1),
// Relay and accept transactions from this peer, even if -blocksonly is true
+ // This peer is also not subject to limits on how many transaction INVs are tracked
PF_RELAY = (1U << 3),
// Always relay transactions from this peer, even if already in mempool
// Keep parameter interaction: forcerelay implies relay
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 9ad3f5d6f4..f14db379fb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -72,22 +72,22 @@ static constexpr std::chrono::minutes PING_INTERVAL{2};
static const unsigned int MAX_LOCATOR_SZ = 101;
/** The maximum number of entries in an 'inv' protocol message */
static const unsigned int MAX_INV_SZ = 50000;
-/** Maximum number of in-flight transactions from a peer */
-static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100;
-/** Maximum number of announced transactions from a peer */
-static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
-/** How many microseconds to delay requesting transactions via txids, if we have wtxid-relaying peers */
-static constexpr std::chrono::microseconds TXID_RELAY_DELAY{std::chrono::seconds{2}};
-/** How many microseconds to delay requesting transactions from inbound peers */
-static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}};
+/** Maximum number of in-flight transaction requests from a peer. It is not a hard limit, but the threshold at which
+ * point the OVERLOADED_PEER_TX_DELAY kicks in. */
+static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
+/** Maximum number of transactions to consider for requesting, per peer. It provides a reasonable DoS limit to
+ * per-peer memory usage spent on announcements, while covering peers continuously sending INVs at the maximum
+ * rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for several minutes, while not receiving
+ * the actual transaction (from any peer) in response to requests for them. */
+static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
+/** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */
+static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2};
+/** How long to delay requesting transactions from non-preferred peers */
+static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2};
+/** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */
+static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2};
/** How long to wait (in microseconds) before downloading a transaction from an additional peer */
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
-/** Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others. */
-static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{std::chrono::seconds{2}};
-/** How long to wait (in microseconds) before expiring an in-flight getdata request to a peer */
-static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{GETDATA_TX_INTERVAL * 10};
-static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY,
-"To preserve security, MAX_GETDATA_RANDOM_DELAY should not exceed INBOUND_PEER_DELAY");
/** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Number of blocks that can be requested at any given time from a single peer. */
@@ -375,69 +375,6 @@ struct CNodeState {
//! Time of last new block announcement
int64_t m_last_block_announcement;
- /*
- * State associated with transaction download.
- *
- * Tx download algorithm:
- *
- * When inv comes in, queue up (process_time, txid) inside the peer's
- * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer
- * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS).
- *
- * The process_time for a transaction is set to nNow for outbound peers,
- * nNow + 2 seconds for inbound peers. This is the time at which we'll
- * consider trying to request the transaction from the peer in
- * SendMessages(). The delay for inbound peers is to allow outbound peers
- * a chance to announce before we request from inbound peers, to prevent
- * an adversary from using inbound connections to blind us to a
- * transaction (InvBlock).
- *
- * When we call SendMessages() for a given peer,
- * we will loop over the transactions in m_tx_process_time, looking
- * at the transactions whose process_time <= nNow. We'll request each
- * such transaction that we don't have already and that hasn't been
- * requested from another peer recently, up until we hit the
- * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update
- * g_already_asked_for for each requested txid, storing the time of the
- * GETDATA request. We use g_already_asked_for to coordinate transaction
- * requests amongst our peers.
- *
- * For transactions that we still need but we have already recently
- * requested from some other peer, we'll reinsert (process_time, txid)
- * back into the peer's m_tx_process_time at the point in the future at
- * which the most recent GETDATA request would time out (ie
- * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for).
- * We add an additional delay for inbound peers, again to prefer
- * attempting download from outbound peers first.
- * We also add an extra small random delay up to 2 seconds
- * to avoid biasing some peers over others. (e.g., due to fixed ordering
- * of peer processing in ThreadMessageHandler).
- *
- * When we receive a transaction from a peer, we remove the txid from the
- * peer's m_tx_in_flight set and from their recently announced set
- * (m_tx_announced). We also clear g_already_asked_for for that entry, so
- * that if somehow the transaction is not accepted but also not added to
- * the reject filter, then we will eventually redownload from other
- * peers.
- */
- struct TxDownloadState {
- /* Track when to attempt download of announced transactions (process
- * time in micros -> txid)
- */
- std::multimap<std::chrono::microseconds, GenTxid> m_tx_process_time;
-
- //! Store all the transactions a peer has recently announced
- std::set<uint256> m_tx_announced;
-
- //! Store transactions which were requested by us, with timestamp
- std::map<uint256, std::chrono::microseconds> m_tx_in_flight;
-
- //! Periodically check for stuck getdata requests
- std::chrono::microseconds m_check_expiry_timer{0};
- };
-
- TxDownloadState m_tx_download;
-
//! Whether this peer is an inbound connection
bool m_is_inbound;
@@ -478,9 +415,6 @@ struct CNodeState {
}
};
-// Keeps track of the time (in microseconds) when transactions were requested last time
-limitedmap<uint256, std::chrono::microseconds> g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ);
-
/** Map maintaining per-node state. */
static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
@@ -817,73 +751,35 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
}
}
-void EraseTxRequest(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- g_already_asked_for.erase(gtxid.GetHash());
-}
-
-std::chrono::microseconds GetTxRequestTime(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- auto it = g_already_asked_for.find(gtxid.GetHash());
- if (it != g_already_asked_for.end()) {
- return it->second;
- }
- return {};
-}
-
-void UpdateTxRequestTime(const GenTxid& gtxid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- auto it = g_already_asked_for.find(gtxid.GetHash());
- if (it == g_already_asked_for.end()) {
- g_already_asked_for.insert(std::make_pair(gtxid.GetHash(), request_time));
- } else {
- g_already_asked_for.update(it, request_time);
- }
-}
-
-std::chrono::microseconds CalculateTxGetDataTime(const GenTxid& gtxid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
-{
- std::chrono::microseconds process_time;
- const auto last_request_time = GetTxRequestTime(gtxid);
- // First time requesting this tx
- if (last_request_time.count() == 0) {
- process_time = current_time;
- } else {
- // Randomize the delay to avoid biasing some peers over others (such as due to
- // fixed ordering of peer processing in ThreadMessageHandler)
- process_time = last_request_time + GETDATA_TX_INTERVAL + GetRandMicros(MAX_GETDATA_RANDOM_DELAY);
- }
-
- // We delay processing announcements from inbound peers
- if (use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY;
-
- // We delay processing announcements from peers that use txid-relay (instead of wtxid)
- if (use_txid_delay) process_time += TXID_RELAY_DELAY;
-
- return process_time;
-}
+} // namespace
-void RequestTx(CNodeState* state, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void PeerManager::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
{
- CNodeState::TxDownloadState& peer_download_state = state->m_tx_download;
- if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
- peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
- peer_download_state.m_tx_announced.count(gtxid.GetHash())) {
- // Too many queued announcements from this peer, or we already have
- // this announcement
+ AssertLockHeld(::cs_main); // For m_txrequest
+ NodeId nodeid = node.GetId();
+ if (!node.HasPermission(PF_RELAY) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
+ // Too many queued announcements from this peer
return;
}
- peer_download_state.m_tx_announced.insert(gtxid.GetHash());
-
- // Calculate the time to try requesting this transaction. Use
- // fPreferredDownload as a proxy for outbound peers.
- const auto process_time = CalculateTxGetDataTime(gtxid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0);
-
- peer_download_state.m_tx_process_time.emplace(process_time, gtxid);
+ const CNodeState* state = State(nodeid);
+
+ // Decide the TxRequestTracker parameters for this announcement:
+ // - "preferred": if fPreferredDownload is set (= outbound, or PF_NOBAN permission)
+ // - "reqtime": current time plus delays for:
+ // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
+ // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
+ // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
+ // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have PF_RELAY).
+ auto delay = std::chrono::microseconds{0};
+ const bool preferred = state->fPreferredDownload;
+ if (!preferred) delay += NONPREF_PEER_TX_DELAY;
+ if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
+ const bool overloaded = !node.HasPermission(PF_RELAY) &&
+ m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
+ if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
+ m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
}
-} // namespace
-
// This function is used for testing the stale tip eviction logic, see
// denialofservice_tests.cpp
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
@@ -900,6 +796,7 @@ void PeerManager::InitializeNode(CNode *pnode) {
{
LOCK(cs_main);
mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn()));
+ assert(m_txrequest.Count(nodeid) == 0);
}
{
PeerRef peer = std::make_shared<Peer>(nodeid);
@@ -957,6 +854,7 @@ void PeerManager::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
mapBlocksInFlight.erase(entry.hash);
}
EraseOrphansFor(nodeid);
+ m_txrequest.DisconnectedPeer(nodeid);
nPreferredDownload -= state->fPreferredDownload;
nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
assert(nPeersWithValidatedDownloads >= 0);
@@ -974,6 +872,7 @@ void PeerManager::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
assert(nPeersWithValidatedDownloads == 0);
assert(g_outbound_peers_with_protect_from_disconnect == 0);
assert(g_wtxid_relay_peers == 0);
+ assert(m_txrequest.Size() == 0);
}
LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
}
@@ -1286,7 +1185,8 @@ PeerManager::PeerManager(const CChainParams& chainparams, CConnman& connman, Ban
/**
* Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
- * block. Also save the time of the last tip update.
+ * block, remember the recently confirmed transactions, and delete tracked
+ * announcements for them. Also save the time of the last tip update.
*/
void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
{
@@ -1330,6 +1230,13 @@ void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, co
}
}
}
+ {
+ LOCK(cs_main);
+ for (const auto& ptx : pblock->vtx) {
+ m_txrequest.ForgetTxHash(ptx->GetHash());
+ m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
+ }
+ }
}
void PeerManager::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
@@ -2555,8 +2462,9 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// At this point, the outgoing message serialization version can't change.
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
- if (msg_type == NetMsgType::VERACK)
- {
+ if (msg_type == NetMsgType::VERACK) {
+ if (pfrom.fSuccessfullyConnected) return;
+
if (!pfrom.IsInboundConn()) {
// Mark this node as currently connected, so we update its timestamp later.
LOCK(cs_main);
@@ -2770,7 +2678,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
pfrom.fDisconnect = true;
return;
} else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
- RequestTx(State(pfrom.GetId()), gtxid, current_time);
+ AddTxAnnouncement(pfrom, gtxid, current_time);
}
} else {
LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
@@ -3024,11 +2932,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
TxValidationState state;
- for (const GenTxid& gtxid : {GenTxid(false, txid), GenTxid(true, wtxid)}) {
- nodestate->m_tx_download.m_tx_announced.erase(gtxid.GetHash());
- nodestate->m_tx_download.m_tx_in_flight.erase(gtxid.GetHash());
- EraseTxRequest(gtxid);
- }
+ m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
+ if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
std::list<CTransactionRef> lRemovedTxn;
@@ -3047,6 +2952,10 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
if (!AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool) &&
AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) {
m_mempool.check(&::ChainstateActive().CoinsTip());
+ // As this version of the transaction was acceptable, we can forget about any
+ // requests for it.
+ m_txrequest.ForgetTxHash(tx.GetHash());
+ m_txrequest.ForgetTxHash(tx.GetWitnessHash());
RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
@@ -3102,10 +3011,14 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// protocol for getting all unconfirmed parents.
const GenTxid gtxid{/* is_wtxid=*/false, parent_txid};
pfrom.AddKnownTx(parent_txid);
- if (!AlreadyHaveTx(gtxid, m_mempool)) RequestTx(State(pfrom.GetId()), gtxid, current_time);
+ if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time);
}
AddOrphanTx(ptx, pfrom.GetId());
+ // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
+ m_txrequest.ForgetTxHash(tx.GetHash());
+ m_txrequest.ForgetTxHash(tx.GetWitnessHash());
+
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
@@ -3122,6 +3035,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// from any of our non-wtxidrelay peers.
recentRejects->insert(tx.GetHash());
recentRejects->insert(tx.GetWitnessHash());
+ m_txrequest.ForgetTxHash(tx.GetHash());
+ m_txrequest.ForgetTxHash(tx.GetWitnessHash());
}
} else {
if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
@@ -3140,6 +3055,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(tx.GetWitnessHash());
+ m_txrequest.ForgetTxHash(tx.GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
// failure, since this check depends only on the txid
@@ -3150,6 +3066,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// parent-fetching by txid via the orphan-handling logic).
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
recentRejects->insert(tx.GetHash());
+ m_txrequest.ForgetTxHash(tx.GetHash());
}
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
@@ -3790,24 +3707,15 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
}
if (msg_type == NetMsgType::NOTFOUND) {
- // Remove the NOTFOUND transactions from the peer
- LOCK(cs_main);
- CNodeState *state = State(pfrom.GetId());
std::vector<CInv> vInv;
vRecv >> vInv;
- if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (vInv.size() <= MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ LOCK(::cs_main);
for (CInv &inv : vInv) {
if (inv.IsGenTxMsg()) {
- // If we receive a NOTFOUND message for a txid we requested, erase
- // it from our data structures for this peer.
- auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash);
- if (in_flight_it == state->m_tx_download.m_tx_in_flight.end()) {
- // Skip any further work if this is a spurious NOTFOUND
- // message.
- continue;
- }
- state->m_tx_download.m_tx_in_flight.erase(in_flight_it);
- state->m_tx_download.m_tx_announced.erase(inv.hash);
+ // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
+ // completed in TxRequestTracker.
+ m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
}
}
}
@@ -4582,67 +4490,26 @@ bool PeerManager::SendMessages(CNode* pto)
//
// Message: getdata (non-blocks)
//
-
- // For robustness, expire old requests after a long timeout, so that
- // we can resume downloading transactions from a peer even if they
- // were unresponsive in the past.
- // Eventually we should consider disconnecting peers, but this is
- // conservative.
- if (state.m_tx_download.m_check_expiry_timer <= current_time) {
- for (auto it=state.m_tx_download.m_tx_in_flight.begin(); it != state.m_tx_download.m_tx_in_flight.end();) {
- if (it->second <= current_time - TX_EXPIRY_INTERVAL) {
- LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n", it->first.ToString(), pto->GetId());
- state.m_tx_download.m_tx_announced.erase(it->first);
- state.m_tx_download.m_tx_in_flight.erase(it++);
- } else {
- ++it;
- }
- }
- // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize
- // so that we're not doing this for all peers at the same time.
- state.m_tx_download.m_check_expiry_timer = current_time + TX_EXPIRY_INTERVAL / 2 + GetRandMicros(TX_EXPIRY_INTERVAL);
- }
-
- auto& tx_process_time = state.m_tx_download.m_tx_process_time;
- while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
- const GenTxid gtxid = tx_process_time.begin()->second;
- // Erase this entry from tx_process_time (it may be added back for
- // processing at a later time, see below)
- tx_process_time.erase(tx_process_time.begin());
- CInv inv(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
- if (!AlreadyHaveTx(ToGenTxid(inv), m_mempool)) {
- // If this transaction was last requested more than 1 minute ago,
- // then request.
- const auto last_request_time = GetTxRequestTime(gtxid);
- if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
- LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
- vGetData.push_back(inv);
- if (vGetData.size() >= MAX_GETDATA_SZ) {
- m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
- vGetData.clear();
- }
- UpdateTxRequestTime(gtxid, current_time);
- state.m_tx_download.m_tx_in_flight.emplace(gtxid.GetHash(), current_time);
- } else {
- // This transaction is in flight from someone else; queue
- // up processing to happen after the download times out
- // (with a slight delay for inbound peers, to prefer
- // requests to outbound peers).
- // Don't apply the txid-delay to re-requests of a
- // transaction; the heuristic of delaying requests to
- // txid-relay peers is to save bandwidth on initial
- // announcement of a transaction, and doesn't make sense
- // for a followup request if our first peer times out (and
- // would open us up to an attacker using inbound
- // wtxid-relay to prevent us from requesting transactions
- // from outbound txid-relay peers).
- const auto next_process_time = CalculateTxGetDataTime(gtxid, current_time, !state.fPreferredDownload, false);
- tx_process_time.emplace(next_process_time, gtxid);
+ std::vector<std::pair<NodeId, GenTxid>> expired;
+ auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
+ for (const auto& entry : expired) {
+ LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
+ entry.second.GetHash().ToString(), entry.first);
+ }
+ for (const GenTxid& gtxid : requestable) {
+ if (!AlreadyHaveTx(gtxid, m_mempool)) {
+ LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
+ gtxid.GetHash().ToString(), pto->GetId());
+ vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
+ if (vGetData.size() >= MAX_GETDATA_SZ) {
+ m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
+ vGetData.clear();
}
+ m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
} else {
- // We have already seen this transaction, no need to download.
- state.m_tx_download.m_tx_announced.erase(gtxid.GetHash());
- state.m_tx_download.m_tx_in_flight.erase(gtxid.GetHash());
+ // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
+ // this should already be called whenever a transaction becomes AlreadyHaveTx().
+ m_txrequest.ForgetTxHash(gtxid.GetHash());
}
}
diff --git a/src/net_processing.h b/src/net_processing.h
index 946a5f4715..578660355a 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -9,6 +9,7 @@
#include <consensus/params.h>
#include <net.h>
#include <sync.h>
+#include <txrequest.h>
#include <validationinterface.h>
class BlockTransactionsRequest;
@@ -127,12 +128,19 @@ private:
void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req);
+ /** Register with TxRequestTracker that an INV has been received from a
+ * peer. The announcement parameters are decided in PeerManager and then
+ * passed to TxRequestTracker. */
+ void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
const CChainParams& m_chainparams;
CConnman& m_connman;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
BanMan* const m_banman;
ChainstateManager& m_chainman;
CTxMemPool& m_mempool;
+ TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
};
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 77cb1781a4..00544f64fe 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -399,8 +399,8 @@ template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txI
/** A generic txid reference (txid or wtxid). */
class GenTxid
{
- const bool m_is_wtxid;
- const uint256 m_hash;
+ bool m_is_wtxid;
+ uint256 m_hash;
public:
GenTxid(bool is_wtxid, const uint256& hash) : m_is_wtxid(is_wtxid), m_hash(hash) {}
bool IsWtxid() const { return m_is_wtxid; }
diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml
index e1a88c4051..bcc8c210f5 100644
--- a/src/secp256k1/.travis.yml
+++ b/src/secp256k1/.travis.yml
@@ -17,33 +17,29 @@ compiler:
- gcc
env:
global:
- - WIDEMUL=auto BIGNUM=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
+ - WIDEMUL=auto BIGNUM=auto STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check WITH_VALGRIND=yes RUN_VALGRIND=no EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
matrix:
- WIDEMUL=int64 RECOVERY=yes
- WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- - WIDEMUL=int64 ENDOMORPHISM=yes
- WIDEMUL=int128
- WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- - WIDEMUL=int128 ENDOMORPHISM=yes
- - WIDEMUL=int128 ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
+ - WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128 ASM=x86_64
- - WIDEMUL=int128 ENDOMORPHISM=yes ASM=x86_64
- BIGNUM=no
- - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
+ - BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- - BUILD=distcheck CTIMETEST= BENCH=
+ - BUILD=distcheck WITH_VALGRIND=no CTIMETEST=no BENCH=no
- CPPFLAGS=-DDETERMINISTIC
- - CFLAGS=-O0 CTIMETEST=
+ - CFLAGS=-O0 CTIMETEST=no
- ECMULTGENPRECISION=2
- ECMULTGENPRECISION=8
- - VALGRIND=yes ENDOMORPHISM=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD=
- - VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD=
+ - RUN_VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" BUILD=
matrix:
fast_finish: true
include:
- compiler: clang
os: linux
- env: HOST=i686-linux-gnu ENDOMORPHISM=yes
+ env: HOST=i686-linux-gnu
addons:
apt:
packages:
@@ -63,7 +59,7 @@ matrix:
- libtool-bin
- libc6-dbg:i386
- compiler: gcc
- env: HOST=i686-linux-gnu ENDOMORPHISM=yes
+ env: HOST=i686-linux-gnu
os: linux
addons:
apt:
diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md
index 434178b372..2602475787 100644
--- a/src/secp256k1/README.md
+++ b/src/secp256k1/README.md
@@ -48,7 +48,7 @@ Implementation details
* Use wNAF notation for point multiplicands.
* Use a much larger window for multiples of G, using precomputed multiples.
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
- * Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
+ * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Point multiplication for signing
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
index 6fe8984f4d..5a078e6c81 100644
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -67,7 +67,7 @@ esac
CFLAGS="-W $CFLAGS"
-warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
+warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
saved_CFLAGS="$CFLAGS"
CFLAGS="$warn_CFLAGS $CFLAGS"
AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}])
@@ -116,11 +116,6 @@ AC_ARG_ENABLE(exhaustive_tests,
[use_exhaustive_tests=$enableval],
[use_exhaustive_tests=yes])
-AC_ARG_ENABLE(endomorphism,
- AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]),
- [use_endomorphism=$enableval],
- [use_endomorphism=no])
-
AC_ARG_ENABLE(ecmult_static_precomputation,
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]),
[use_ecmult_static_precomputation=$enableval],
@@ -164,8 +159,7 @@ AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
-[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
-[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.]
+[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
)],
[req_ecmult_window=$withval], [req_ecmult_window=auto])
@@ -178,7 +172,21 @@ AC_ARG_WITH([ecmult-gen-precision], [AS_HELP_STRING([--with-ecmult-gen-precision
)],
[req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto])
-AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [enable_valgrind=no], [])
+AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto],
+[Build with extra checks for running inside Valgrind [default=auto]]
+)],
+[req_valgrind=$withval], [req_valgrind=auto])
+
+if test x"$req_valgrind" = x"no"; then
+ enable_valgrind=no
+else
+ AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [
+ if test x"$req_valgrind" = x"yes"; then
+ AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available])
+ fi
+ enable_valgrind=no
+ ], [])
+fi
AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
if test x"$enable_coverage" = x"yes"; then
@@ -415,10 +423,6 @@ if test x"$set_bignum" = x"gmp"; then
SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS"
fi
-if test x"$use_endomorphism" = x"yes"; then
- AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
-fi
-
if test x"$set_precomp" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
@@ -500,7 +504,6 @@ AC_OUTPUT
echo
echo "Build Options:"
-echo " with endomorphism = $use_endomorphism"
echo " with ecmult precomp = $set_precomp"
echo " with external callbacks = $use_external_default_callbacks"
echo " with benchmarks = $use_benchmark"
diff --git a/src/secp256k1/contrib/travis.sh b/src/secp256k1/contrib/travis.sh
index b0b55b44b8..24cc9315cb 100755
--- a/src/secp256k1/contrib/travis.sh
+++ b/src/secp256k1/contrib/travis.sh
@@ -13,27 +13,28 @@ then
fi
./configure \
- --enable-experimental="$EXPERIMENTAL" --enable-endomorphism="$ENDOMORPHISM" \
+ --enable-experimental="$EXPERIMENTAL" \
--with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \
--enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \
--enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \
--enable-module-schnorrsig="$SCHNORRSIG" \
+ --with-valgrind="$WITH_VALGRIND" \
--host="$HOST" $EXTRAFLAGS
if [ -n "$BUILD" ]
then
make -j2 "$BUILD"
fi
-if [ -n "$VALGRIND" ]
+if [ "$RUN_VALGRIND" = "yes" ]
then
make -j2
# the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (http://valgrind.org/docs/manual/manual-core.html)
valgrind --error-exitcode=42 ./tests 16
valgrind --error-exitcode=42 ./exhaustive_tests
fi
-if [ -n "$BENCH" ]
+if [ "$BENCH" = "yes" ]
then
- if [ -n "$VALGRIND" ]
+ if [ "$RUN_VALGRIND" = "yes" ]
then
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
EXEC='./libtool --mode=execute valgrind --error-exitcode=42'
@@ -56,8 +57,12 @@ then
then
$EXEC ./bench_ecdh >> bench.log 2>&1
fi
+ if [ "$SCHNORRSIG" = "yes" ]
+ then
+ $EXEC ./bench_schnorrsig >> bench.log 2>&1
+ fi
fi
-if [ -n "$CTIMETEST" ]
+if [ "$CTIMETEST" = "yes" ]
then
./libtool --mode=execute valgrind --error-exitcode=42 ./valgrind_ctime_test > valgrind_ctime_test.log 2>&1
fi
diff --git a/src/secp256k1/sage/gen_exhaustive_groups.sage b/src/secp256k1/sage/gen_exhaustive_groups.sage
new file mode 100644
index 0000000000..3c3c984811
--- /dev/null
+++ b/src/secp256k1/sage/gen_exhaustive_groups.sage
@@ -0,0 +1,129 @@
+# Define field size and field
+P = 2^256 - 2^32 - 977
+F = GF(P)
+BETA = F(0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee)
+
+assert(BETA != F(1) and BETA^3 == F(1))
+
+orders_done = set()
+results = {}
+first = True
+for b in range(1, P):
+ # There are only 6 curves (up to isomorphism) of the form y^2=x^3+B. Stop once we have tried all.
+ if len(orders_done) == 6:
+ break
+
+ E = EllipticCurve(F, [0, b])
+ print("Analyzing curve y^2 = x^3 + %i" % b)
+ n = E.order()
+ # Skip curves with an order we've already tried
+ if n in orders_done:
+ print("- Isomorphic to earlier curve")
+ continue
+ orders_done.add(n)
+ # Skip curves isomorphic to the real secp256k1
+ if n.is_pseudoprime():
+ print(" - Isomorphic to secp256k1")
+ continue
+
+ print("- Finding subgroups")
+
+ # Find what prime subgroups exist
+ for f, _ in n.factor():
+ print("- Analyzing subgroup of order %i" % f)
+ # Skip subgroups of order >1000
+ if f < 4 or f > 1000:
+ print(" - Bad size")
+ continue
+
+ # Iterate over X coordinates until we find one that is on the curve, has order f,
+ # and for which curve isomorphism exists that maps it to X coordinate 1.
+ for x in range(1, P):
+ # Skip X coordinates not on the curve, and construct the full point otherwise.
+ if not E.is_x_coord(x):
+ continue
+ G = E.lift_x(F(x))
+
+ print(" - Analyzing (multiples of) point with X=%i" % x)
+
+ # Skip points whose order is not a multiple of f. Project the point to have
+ # order f otherwise.
+ if (G.order() % f):
+ print(" - Bad order")
+ continue
+ G = G * (G.order() // f)
+
+ # Find lambda for endomorphism. Skip if none can be found.
+ lam = None
+ for l in Integers(f)(1).nth_root(3, all=True):
+ if int(l)*G == E(BETA*G[0], G[1]):
+ lam = int(l)
+ break
+ if lam is None:
+ print(" - No endomorphism for this subgroup")
+ break
+
+ # Now look for an isomorphism of the curve that gives this point an X
+ # coordinate equal to 1.
+ # If (x,y) is on y^2 = x^3 + b, then (a^2*x, a^3*y) is on y^2 = x^3 + a^6*b.
+ # So look for m=a^2=1/x.
+ m = F(1)/G[0]
+ if not m.is_square():
+ print(" - No curve isomorphism maps it to a point with X=1")
+ continue
+ a = m.sqrt()
+ rb = a^6*b
+ RE = EllipticCurve(F, [0, rb])
+
+ # Use as generator twice the image of G under the above isormorphism.
+ # This means that generator*(1/2 mod f) will have X coordinate 1.
+ RG = RE(1, a^3*G[1]) * 2
+ # And even Y coordinate.
+ if int(RG[1]) % 2:
+ RG = -RG
+ assert(RG.order() == f)
+ assert(lam*RG == RE(BETA*RG[0], RG[1]))
+
+ # We have found curve RE:y^2=x^3+rb with generator RG of order f. Remember it
+ results[f] = {"b": rb, "G": RG, "lambda": lam}
+ print(" - Found solution")
+ break
+
+ print("")
+
+print("")
+print("")
+print("/* To be put in src/group_impl.h: */")
+first = True
+for f in sorted(results.keys()):
+ b = results[f]["b"]
+ G = results[f]["G"]
+ print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
+ first = False
+ print("static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(")
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
+ print(");")
+ print("static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(")
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
+ print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
+ print(");")
+print("# else")
+print("# error No known generator for the specified exhaustive test group order.")
+print("# endif")
+
+print("")
+print("")
+print("/* To be put in src/scalar_impl.h: */")
+first = True
+for f in sorted(results.keys()):
+ lam = results[f]["lambda"]
+ print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
+ first = False
+ print("# define EXHAUSTIVE_TEST_LAMBDA %i" % lam)
+print("# else")
+print("# error No known lambda for the specified exhaustive test group order.")
+print("# endif")
+print("")
diff --git a/src/secp256k1/src/assumptions.h b/src/secp256k1/src/assumptions.h
index f9d4e8e793..77204de2b8 100644
--- a/src/secp256k1/src/assumptions.h
+++ b/src/secp256k1/src/assumptions.h
@@ -7,6 +7,8 @@
#ifndef SECP256K1_ASSUMPTIONS_H
#define SECP256K1_ASSUMPTIONS_H
+#include <limits.h>
+
#include "util.h"
/* This library, like most software, relies on a number of compiler implementation defined (but not undefined)
@@ -19,7 +21,11 @@ struct secp256k1_assumption_checker {
allowed. */
int dummy_array[(
/* Bytes are 8 bits. */
- CHAR_BIT == 8 &&
+ (CHAR_BIT == 8) &&
+
+ /* No integer promotion for uint32_t. This ensures that we can multiply uintXX_t values where XX >= 32
+ without signed overflow, which would be undefined behaviour. */
+ (UINT_MAX <= UINT32_MAX) &&
/* Conversions from unsigned to signed outside of the bounds of the signed type are
implementation-defined. Verify that they function as reinterpreting the lower
diff --git a/src/secp256k1/src/basic-config.h b/src/secp256k1/src/basic-config.h
index 83dbe6f25b..b0d82e89b4 100644
--- a/src/secp256k1/src/basic-config.h
+++ b/src/secp256k1/src/basic-config.h
@@ -11,7 +11,6 @@
#undef USE_ASM_X86_64
#undef USE_ECMULT_STATIC_PRECOMPUTATION
-#undef USE_ENDOMORPHISM
#undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
#undef USE_FIELD_INV_BUILTIN
diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c
index 9687fe4482..5f2b7a9759 100644
--- a/src/secp256k1/src/bench_internal.c
+++ b/src/secp256k1/src/bench_internal.c
@@ -117,7 +117,6 @@ void bench_scalar_mul(void* arg, int iters) {
}
}
-#ifdef USE_ENDOMORPHISM
void bench_scalar_split(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
@@ -128,7 +127,6 @@ void bench_scalar_split(void* arg, int iters) {
}
CHECK(j <= iters);
}
-#endif
void bench_scalar_inverse(void* arg, int iters) {
int i, j = 0;
@@ -397,9 +395,7 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10);
-#ifdef USE_ENDOMORPHISM
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters);
-#endif
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);
diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h
index c9b198239d..09e8146414 100644
--- a/src/secp256k1/src/ecmult.h
+++ b/src/secp256k1/src/ecmult.h
@@ -15,9 +15,7 @@
typedef struct {
/* For accelerating the computation of a*P + b*G: */
secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
-#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
-#endif
} secp256k1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h
index 55b61e4937..bb9511108b 100644
--- a/src/secp256k1/src/ecmult_const_impl.h
+++ b/src/secp256k1/src/ecmult_const_impl.h
@@ -140,19 +140,16 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
secp256k1_fe Z;
int skew_1;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
secp256k1_scalar q_1, q_lam;
-#endif
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
/* build wnaf representation for q. */
int rsize = size;
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
@@ -160,12 +157,9 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
-#endif
{
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
-#ifdef USE_ENDOMORPHISM
skew_lam = 0;
-#endif
}
/* Calculate odd multiples of a.
@@ -179,14 +173,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
}
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
}
-#endif
/* first loop iteration (separated out so we can directly set r, rather
* than having it start at infinity, get doubled several times, then have
@@ -195,14 +187,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
}
-#endif
/* remaining loop iterations */
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
@@ -215,14 +205,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
}
-#endif
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
@@ -231,43 +219,35 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
/* Correct for wNAF skew */
secp256k1_ge correction = *a;
secp256k1_ge_storage correction_1_stor;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage correction_lam_stor;
-#endif
secp256k1_ge_storage a2_stor;
secp256k1_gej tmpj;
secp256k1_gej_set_ge(&tmpj, &correction);
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
secp256k1_ge_set_gej(&correction, &tmpj);
secp256k1_ge_to_storage(&correction_1_stor, a);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_to_storage(&correction_lam_stor, a);
}
-#endif
secp256k1_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
}
-#endif
/* Apply the correction */
secp256k1_ge_from_storage(&correction, &correction_1_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_ge_mul_lambda(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
}
-#endif
}
}
diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h
index f03fa9469d..057a69cf73 100644
--- a/src/secp256k1/src/ecmult_impl.h
+++ b/src/secp256k1/src/ecmult_impl.h
@@ -38,8 +38,8 @@
* (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
* where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
* be larger due to platform-specific padding and alignment.
- * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
- * two tables of this size are used instead of only one.
+ * Two tables of this size are used (due to the endomorphism
+ * optimization).
*/
# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
@@ -59,11 +59,7 @@
# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
-#ifdef USE_ENDOMORPHISM
- #define WNAF_BITS 128
-#else
- #define WNAF_BITS 256
-#endif
+#define WNAF_BITS 128
#define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
@@ -77,17 +73,9 @@
#define PIPPENGER_MAX_BUCKET_WINDOW 12
/* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
-#ifdef USE_ENDOMORPHISM
- #define ECMULT_PIPPENGER_THRESHOLD 88
-#else
- #define ECMULT_PIPPENGER_THRESHOLD 160
-#endif
+#define ECMULT_PIPPENGER_THRESHOLD 88
-#ifdef USE_ENDOMORPHISM
- #define ECMULT_MAX_POINTS_PER_BATCH 5000000
-#else
- #define ECMULT_MAX_POINTS_PER_BATCH 10000000
-#endif
+#define ECMULT_MAX_POINTS_PER_BATCH 5000000
/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
* the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
@@ -313,16 +301,12 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
-#ifdef USE_ENDOMORPHISM
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
-#endif
;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
-#ifdef USE_ENDOMORPHISM
ctx->pre_g_128 = NULL;
-#endif
}
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
@@ -347,7 +331,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void *
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
-#ifdef USE_ENDOMORPHISM
{
secp256k1_gej g_128j;
int i;
@@ -364,7 +347,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void *
}
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
}
-#endif
}
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
@@ -372,11 +354,9 @@ static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *d
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
-#ifdef USE_ENDOMORPHISM
if (src->pre_g_128 != NULL) {
dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
-#endif
}
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
@@ -447,16 +427,11 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a,
}
struct secp256k1_strauss_point_state {
-#ifdef USE_ENDOMORPHISM
secp256k1_scalar na_1, na_lam;
- int wnaf_na_1[130];
- int wnaf_na_lam[130];
+ int wnaf_na_1[129];
+ int wnaf_na_lam[129];
int bits_na_1;
int bits_na_lam;
-#else
- int wnaf_na[256];
- int bits_na;
-#endif
size_t input_pos;
};
@@ -464,26 +439,19 @@ struct secp256k1_strauss_state {
secp256k1_gej* prej;
secp256k1_fe* zr;
secp256k1_ge* pre_a;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge* pre_a_lam;
-#endif
struct secp256k1_strauss_point_state* ps;
};
static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
secp256k1_ge tmpa;
secp256k1_fe Z;
-#ifdef USE_ENDOMORPHISM
/* Splitted G factors. */
secp256k1_scalar ng_1, ng_128;
int wnaf_ng_1[129];
int bits_ng_1 = 0;
int wnaf_ng_128[129];
int bits_ng_128 = 0;
-#else
- int wnaf_ng[256];
- int bits_ng = 0;
-#endif
int i;
int bits = 0;
int np;
@@ -494,28 +462,20 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
continue;
}
state->ps[no].input_pos = np;
-#ifdef USE_ENDOMORPHISM
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
/* build wnaf representation for na_1 and na_lam. */
- state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A);
- state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A);
- VERIFY_CHECK(state->ps[no].bits_na_1 <= 130);
- VERIFY_CHECK(state->ps[no].bits_na_lam <= 130);
+ state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A);
+ state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A);
+ VERIFY_CHECK(state->ps[no].bits_na_1 <= 129);
+ VERIFY_CHECK(state->ps[no].bits_na_lam <= 129);
if (state->ps[no].bits_na_1 > bits) {
bits = state->ps[no].bits_na_1;
}
if (state->ps[no].bits_na_lam > bits) {
bits = state->ps[no].bits_na_lam;
}
-#else
- /* build wnaf representation for na. */
- state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A);
- if (state->ps[no].bits_na > bits) {
- bits = state->ps[no].bits_na;
- }
-#endif
++no;
}
@@ -547,7 +507,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
secp256k1_fe_set_int(&Z, 1);
}
-#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]);
@@ -568,21 +527,12 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
bits = bits_ng_128;
}
}
-#else
- if (ng) {
- bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
- if (bits_ng > bits) {
- bits = bits_ng;
- }
- }
-#endif
secp256k1_gej_set_infinity(r);
for (i = bits - 1; i >= 0; i--) {
int n;
secp256k1_gej_double_var(r, r, NULL);
-#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
@@ -601,18 +551,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
}
-#else
- for (np = 0; np < no; ++np) {
- if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) {
- ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
- secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
- }
- }
- if (i < bits_ng && (n = wnaf_ng[i])) {
- ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
- secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
- }
-#endif
}
if (!r->infinity) {
@@ -625,27 +563,19 @@ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej
secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
struct secp256k1_strauss_point_state ps[1];
-#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
-#endif
struct secp256k1_strauss_state state;
state.prej = prej;
state.zr = zr;
state.pre_a = pre_a;
-#ifdef USE_ENDOMORPHISM
state.pre_a_lam = pre_a_lam;
-#endif
state.ps = ps;
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng);
}
static size_t secp256k1_strauss_scratch_size(size_t n_points) {
-#ifdef USE_ENDOMORPHISM
static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
-#else
- static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
-#endif
return n_points*point_size;
}
@@ -665,12 +595,8 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callba
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
-#ifdef USE_ENDOMORPHISM
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
-#else
- state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
-#endif
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
@@ -868,7 +794,6 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi
* set of buckets) for a given number of points.
*/
static int secp256k1_pippenger_bucket_window(size_t n) {
-#ifdef USE_ENDOMORPHISM
if (n <= 1) {
return 1;
} else if (n <= 4) {
@@ -892,33 +817,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) {
} else {
return PIPPENGER_MAX_BUCKET_WINDOW;
}
-#else
- if (n <= 1) {
- return 1;
- } else if (n <= 11) {
- return 2;
- } else if (n <= 45) {
- return 3;
- } else if (n <= 100) {
- return 4;
- } else if (n <= 275) {
- return 5;
- } else if (n <= 625) {
- return 6;
- } else if (n <= 1850) {
- return 7;
- } else if (n <= 3400) {
- return 8;
- } else if (n <= 9630) {
- return 9;
- } else if (n <= 17900) {
- return 10;
- } else if (n <= 32800) {
- return 11;
- } else {
- return PIPPENGER_MAX_BUCKET_WINDOW;
- }
-#endif
}
/**
@@ -926,7 +824,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) {
*/
static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
switch(bucket_window) {
-#ifdef USE_ENDOMORPHISM
case 1: return 1;
case 2: return 4;
case 3: return 20;
@@ -939,26 +836,11 @@ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
case 10: return 7880;
case 11: return 16050;
case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
-#else
- case 1: return 1;
- case 2: return 11;
- case 3: return 45;
- case 4: return 100;
- case 5: return 275;
- case 6: return 625;
- case 7: return 1850;
- case 8: return 3400;
- case 9: return 9630;
- case 10: return 17900;
- case 11: return 32800;
- case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
-#endif
}
return 0;
}
-#ifdef USE_ENDOMORPHISM
SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) {
secp256k1_scalar tmp = *s1;
secp256k1_scalar_split_lambda(s1, s2, &tmp);
@@ -973,32 +855,23 @@ SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, s
secp256k1_ge_neg(p2, p2);
}
}
-#endif
/**
* Returns the scratch size required for a given number of points (excluding
* base point G) without considering alignment.
*/
static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
-#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
-#else
- size_t entries = n_points + 1;
-#endif
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
- /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
+ /* Use 2(n+1) with the endomorphism, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
-#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
-#else
- size_t entries = n_points + 1;
-#endif
secp256k1_ge *points;
secp256k1_scalar *scalars;
secp256k1_gej *buckets;
@@ -1035,10 +908,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call
scalars[0] = *inp_g_sc;
points[0] = secp256k1_ge_const_g;
idx++;
-#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
idx++;
-#endif
}
while (point_idx < n_points) {
@@ -1047,10 +918,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call
return 0;
}
idx++;
-#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
idx++;
-#endif
point_idx++;
}
@@ -1093,9 +962,7 @@ static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_cal
size_t space_overhead;
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
-#ifdef USE_ENDOMORPHISM
entry_size = 2*entry_size;
-#endif
space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
if (space_overhead > max_alloc) {
break;
diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h
index 6185be052d..36e39ecf0f 100644
--- a/src/secp256k1/src/group.h
+++ b/src/secp256k1/src/group.h
@@ -59,6 +59,7 @@ static int secp256k1_ge_is_infinity(const secp256k1_ge *a);
/** Check whether a group element is valid (i.e., on the curve). */
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a);
+/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
/** Set a group element equal to another which is given in jacobian coordinates */
@@ -115,10 +116,8 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
-#ifdef USE_ENDOMORPHISM
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
-#endif
/** Clear a secp256k1_gej to prevent leaking sensitive information. */
static void secp256k1_gej_clear(secp256k1_gej *r);
@@ -138,4 +137,15 @@ static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_g
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b);
+/** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve.
+ *
+ * In normal mode, the used group is secp256k1, which has cofactor=1 meaning that every point on the curve is in the
+ * group, and this function returns always true.
+ *
+ * When compiling in exhaustive test mode, a slightly different curve equation is used, leading to a group with a
+ * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this
+ * function checks whether a point that is on the curve is in fact also in that subgroup.
+ */
+static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge);
+
#endif /* SECP256K1_GROUP_H */
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
index ccd93d3483..a5fbc91a0f 100644
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -11,49 +11,38 @@
#include "field.h"
#include "group.h"
-/* These points can be generated in sage as follows:
+/* These exhaustive group test orders and generators are chosen such that:
+ * - The field size is equal to that of secp256k1, so field code is the same.
+ * - The curve equation is of the form y^2=x^3+B for some constant B.
+ * - The subgroup has a generator 2*P, where P.x=1.
+ * - The subgroup has size less than 1000 to permit exhaustive testing.
+ * - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y).
*
- * 0. Setup a worksheet with the following parameters.
- * b = 4 # whatever CURVE_B will be set to
- * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
- * C = EllipticCurve ([F (0), F (b)])
- *
- * 1. Determine all the small orders available to you. (If there are
- * no satisfactory ones, go back and change b.)
- * print C.order().factor(limit=1000)
- *
- * 2. Choose an order as one of the prime factors listed in the above step.
- * (You can also multiply some to get a composite order, though the
- * tests will crash trying to invert scalars during signing.) We take a
- * random point and scale it to drop its order to the desired value.
- * There is some probability this won't work; just try again.
- * order = 199
- * P = C.random_point()
- * P = (int(P.order()) / int(order)) * P
- * assert(P.order() == order)
- *
- * 3. Print the values. You'll need to use a vim macro or something to
- * split the hex output into 4-byte chunks.
- * print "%x %x" % P.xy()
+ * These parameters are generated using sage/gen_exhaustive_groups.sage.
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
-# if EXHAUSTIVE_TEST_ORDER == 199
+# if EXHAUSTIVE_TEST_ORDER == 13
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
- 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
- 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
- 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
- 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
+ 0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f,
+ 0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb,
+ 0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2,
+ 0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24
);
-
-static const int CURVE_B = 4;
-# elif EXHAUSTIVE_TEST_ORDER == 13
+static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(
+ 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc,
+ 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417
+);
+# elif EXHAUSTIVE_TEST_ORDER == 199
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
- 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
- 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
- 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
- 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
+ 0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9,
+ 0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18,
+ 0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c,
+ 0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae
+);
+static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(
+ 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1,
+ 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb
);
-static const int CURVE_B = 2;
# else
# error No known generator for the specified exhaustive test group order.
# endif
@@ -68,7 +57,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
-static const int CURVE_B = 7;
+static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
#endif
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
@@ -219,14 +208,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) {
}
static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
- secp256k1_fe x2, x3, c;
+ secp256k1_fe x2, x3;
r->x = *x;
secp256k1_fe_sqr(&x2, x);
secp256k1_fe_mul(&x3, x, &x2);
r->infinity = 0;
- secp256k1_fe_set_int(&c, CURVE_B);
- secp256k1_fe_add(&c, &x3);
- return secp256k1_fe_sqrt(&r->y, &c);
+ secp256k1_fe_add(&x3, &secp256k1_fe_const_b);
+ return secp256k1_fe_sqrt(&r->y, &x3);
}
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
@@ -269,36 +257,15 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
return a->infinity;
}
-static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
- secp256k1_fe y2, x3, z2, z6;
- if (a->infinity) {
- return 0;
- }
- /** y^2 = x^3 + 7
- * (Y/Z^3)^2 = (X/Z^2)^3 + 7
- * Y^2 / Z^6 = X^3 / Z^6 + 7
- * Y^2 = X^3 + 7*Z^6
- */
- secp256k1_fe_sqr(&y2, &a->y);
- secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
- secp256k1_fe_sqr(&z2, &a->z);
- secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
- secp256k1_fe_mul_int(&z6, CURVE_B);
- secp256k1_fe_add(&x3, &z6);
- secp256k1_fe_normalize_weak(&x3);
- return secp256k1_fe_equal_var(&y2, &x3);
-}
-
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
- secp256k1_fe y2, x3, c;
+ secp256k1_fe y2, x3;
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
- secp256k1_fe_set_int(&c, CURVE_B);
- secp256k1_fe_add(&x3, &c);
+ secp256k1_fe_add(&x3, &secp256k1_fe_const_b);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
}
@@ -679,7 +646,6 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r,
secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
}
-#ifdef USE_ENDOMORPHISM
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
static const secp256k1_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
@@ -688,7 +654,6 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
*r = *a;
secp256k1_fe_mul(&r->x, &r->x, &beta);
}
-#endif
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
secp256k1_fe yz;
@@ -704,4 +669,25 @@ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
return secp256k1_fe_is_quad_var(&yz);
}
+static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
+#ifdef EXHAUSTIVE_TEST_ORDER
+ secp256k1_gej out;
+ int i;
+
+ /* A very simple EC multiplication ladder that avoids a dependecy on ecmult. */
+ secp256k1_gej_set_infinity(&out);
+ for (i = 0; i < 32; ++i) {
+ secp256k1_gej_double_var(&out, &out, NULL);
+ if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) {
+ secp256k1_gej_add_ge_var(&out, &out, ge, NULL);
+ }
+ }
+ return secp256k1_gej_is_infinity(&out);
+#else
+ (void)ge;
+ /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */
+ return 1;
+#endif
+}
+
#endif /* SECP256K1_GROUP_IMPL_H */
diff --git a/src/secp256k1/src/modules/ecdh/tests_impl.h b/src/secp256k1/src/modules/ecdh/tests_impl.h
index fe26e8fb69..e8d2aeab9a 100644
--- a/src/secp256k1/src/modules/ecdh/tests_impl.h
+++ b/src/secp256k1/src/modules/ecdh/tests_impl.h
@@ -80,7 +80,7 @@ void test_ecdh_generator_basepoint(void) {
/* compute "explicitly" */
CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
/* compare */
- CHECK(memcmp(output_ecdh, point_ser, 65) == 0);
+ CHECK(secp256k1_memcmp_var(output_ecdh, point_ser, 65) == 0);
/* compute using ECDH function with default hash function */
CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
@@ -90,7 +90,7 @@ void test_ecdh_generator_basepoint(void) {
secp256k1_sha256_write(&sha, point_ser, point_ser_len);
secp256k1_sha256_finalize(&sha, output_ser);
/* compare */
- CHECK(memcmp(output_ecdh, output_ser, 32) == 0);
+ CHECK(secp256k1_memcmp_var(output_ecdh, output_ser, 32) == 0);
}
}
diff --git a/src/secp256k1/src/modules/extrakeys/Makefile.am.include b/src/secp256k1/src/modules/extrakeys/Makefile.am.include
index 8515f92e7a..0d901ec1f4 100644
--- a/src/secp256k1/src/modules/extrakeys/Makefile.am.include
+++ b/src/secp256k1/src/modules/extrakeys/Makefile.am.include
@@ -1,3 +1,4 @@
include_HEADERS += include/secp256k1_extrakeys.h
noinst_HEADERS += src/modules/extrakeys/tests_impl.h
+noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h
noinst_HEADERS += src/modules/extrakeys/main_impl.h
diff --git a/src/secp256k1/src/modules/extrakeys/main_impl.h b/src/secp256k1/src/modules/extrakeys/main_impl.h
index d319215355..5378d2f301 100644
--- a/src/secp256k1/src/modules/extrakeys/main_impl.h
+++ b/src/secp256k1/src/modules/extrakeys/main_impl.h
@@ -33,6 +33,9 @@ int secp256k1_xonly_pubkey_parse(const secp256k1_context* ctx, secp256k1_xonly_p
if (!secp256k1_ge_set_xo_var(&pk, &x, 0)) {
return 0;
}
+ if (!secp256k1_ge_is_in_correct_subgroup(&pk)) {
+ return 0;
+ }
secp256k1_xonly_pubkey_save(pubkey, &pk);
return 1;
}
@@ -121,7 +124,7 @@ int secp256k1_xonly_pubkey_tweak_add_check(const secp256k1_context* ctx, const u
secp256k1_fe_normalize_var(&pk.y);
secp256k1_fe_get_b32(pk_expected32, &pk.x);
- return memcmp(&pk_expected32, tweaked_pubkey32, 32) == 0
+ return secp256k1_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0
&& secp256k1_fe_is_odd(&pk.y) == tweaked_pk_parity;
}
diff --git a/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
new file mode 100644
index 0000000000..0e29bc6b09
--- /dev/null
+++ b/src/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h
@@ -0,0 +1,68 @@
+/**********************************************************************
+ * Copyright (c) 2020 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
+#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
+
+#include "src/modules/extrakeys/main_impl.h"
+#include "include/secp256k1_extrakeys.h"
+
+static void test_exhaustive_extrakeys(const secp256k1_context *ctx, const secp256k1_ge* group) {
+ secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
+ secp256k1_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1];
+ secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
+ int parities[EXHAUSTIVE_TEST_ORDER - 1];
+ unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
+ int i;
+
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ secp256k1_fe fe;
+ secp256k1_scalar scalar_i;
+ unsigned char buf[33];
+ int parity;
+
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_scalar_get_b32(buf, &scalar_i);
+
+ /* Construct pubkey and keypair. */
+ CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey[i - 1], buf));
+
+ /* Construct serialized xonly_pubkey from keypair. */
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1]));
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
+
+ /* Parse the xonly_pubkey back and verify it matches the previously serialized value. */
+ CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1]));
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
+ CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
+
+ /* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */
+ CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1]));
+ CHECK(parity == parities[i - 1]);
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
+ CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
+
+ /* Compare the xonly_pubkey bytes against the precomputed group. */
+ secp256k1_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]);
+ CHECK(secp256k1_fe_equal_var(&fe, &group[i].x));
+
+ /* Check the parity against the precomputed group. */
+ fe = group[i].y;
+ secp256k1_fe_normalize_var(&fe);
+ CHECK(secp256k1_fe_is_odd(&fe) == parities[i - 1]);
+
+ /* Verify that the higher half is identical to the lower half mirrored. */
+ if (i > EXHAUSTIVE_TEST_ORDER / 2) {
+ CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0);
+ CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]);
+ }
+ }
+
+ /* TODO: keypair/xonly_pubkey tweak tests */
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/extrakeys/tests_impl.h b/src/secp256k1/src/modules/extrakeys/tests_impl.h
index fc9d40eda1..5ee135849e 100644
--- a/src/secp256k1/src/modules/extrakeys/tests_impl.h
+++ b/src/secp256k1/src/modules/extrakeys/tests_impl.h
@@ -35,9 +35,9 @@ void test_xonly_pubkey(void) {
secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
memset(ones32, 0xFF, 32);
- secp256k1_rand256(xy_sk);
+ secp256k1_testrand256(xy_sk);
CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
@@ -60,7 +60,7 @@ void test_xonly_pubkey(void) {
sk[0] = 1;
CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
- CHECK(memcmp(&pk, &xonly_pk, sizeof(pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0);
CHECK(pk_parity == 0);
/* Choose a secret key such that pubkey and xonly_pubkey are each others
@@ -68,7 +68,7 @@ void test_xonly_pubkey(void) {
sk[0] = 2;
CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
- CHECK(memcmp(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
CHECK(pk_parity == 1);
secp256k1_pubkey_load(ctx, &pk1, &pk);
secp256k1_pubkey_load(ctx, &pk2, (secp256k1_pubkey *) &xonly_pk);
@@ -81,7 +81,7 @@ void test_xonly_pubkey(void) {
CHECK(secp256k1_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_serialize(none, buf32, NULL) == 0);
- CHECK(memcmp(buf32, zeros64, 32) == 0);
+ CHECK(secp256k1_memcmp_var(buf32, zeros64, 32) == 0);
CHECK(ecount == 2);
{
/* A pubkey filled with 0s will fail to serialize due to pubkey_load
@@ -104,28 +104,28 @@ void test_xonly_pubkey(void) {
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1);
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1);
- CHECK(memcmp(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
/* Test parsing invalid field elements */
memset(&xonly_pk, 1, sizeof(xonly_pk));
/* Overflowing field element */
CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0);
- CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
memset(&xonly_pk, 1, sizeof(xonly_pk));
/* There's no point with x-coordinate 0 on secp256k1 */
CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0);
- CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
/* If a random 32-byte string can not be parsed with ec_pubkey_parse
* (because interpreted as X coordinate it does not correspond to a point on
* the curve) then xonly_pubkey_parse should fail as well. */
for (i = 0; i < count; i++) {
unsigned char rand33[33];
- secp256k1_rand256(&rand33[1]);
+ secp256k1_testrand256(&rand33[1]);
rand33[0] = SECP256K1_TAG_PUBKEY_EVEN;
if (!secp256k1_ec_pubkey_parse(ctx, &pk, rand33, 33)) {
memset(&xonly_pk, 1, sizeof(xonly_pk));
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0);
- CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
} else {
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1);
}
@@ -154,8 +154,8 @@ void test_xonly_pubkey_tweak(void) {
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
memset(overflows, 0xff, sizeof(overflows));
- secp256k1_rand256(tweak);
- secp256k1_rand256(sk);
+ secp256k1_testrand256(tweak);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
@@ -170,15 +170,15 @@ void test_xonly_pubkey_tweak(void) {
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0);
CHECK(ecount == 4);
/* NULL internal_xonly_pk zeroes the output_pk */
- CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0);
CHECK(ecount == 5);
/* NULL tweak zeroes the output_pk */
- CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
/* Invalid tweak zeroes the output_pk */
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0);
- CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
/* A zero tweak is fine */
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1);
@@ -193,16 +193,16 @@ void test_xonly_pubkey_tweak(void) {
secp256k1_scalar_get_b32(tweak, &scalar_tweak);
CHECK((secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0)
|| (secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0));
- CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
}
/* Invalid pk with a valid tweak */
memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk));
- secp256k1_rand256(tweak);
+ secp256k1_testrand256(tweak);
ecount = 0;
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
- CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
secp256k1_context_destroy(none);
secp256k1_context_destroy(sign);
@@ -228,8 +228,8 @@ void test_xonly_pubkey_tweak_check(void) {
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
memset(overflows, 0xff, sizeof(overflows));
- secp256k1_rand256(tweak);
- secp256k1_rand256(sk);
+ secp256k1_testrand256(tweak);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
@@ -268,7 +268,7 @@ void test_xonly_pubkey_tweak_check(void) {
/* Overflowing tweak not allowed */
CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0);
- CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(ecount == 5);
secp256k1_context_destroy(none);
@@ -287,7 +287,7 @@ void test_xonly_pubkey_tweak_recursive(void) {
unsigned char tweak[N_PUBKEYS - 1][32];
int i;
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(ctx, &pk[0], sk) == 1);
/* Add tweaks */
for (i = 0; i < N_PUBKEYS - 1; i++) {
@@ -327,51 +327,51 @@ void test_keypair(void) {
/* Test keypair_create */
ecount = 0;
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(none, &keypair, sk) == 0);
- CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_create(verify, &keypair, sk) == 0);
- CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
CHECK(secp256k1_keypair_create(sign, NULL, sk) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_keypair_create(sign, &keypair, NULL) == 0);
- CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 4);
/* Invalid secret key */
CHECK(secp256k1_keypair_create(sign, &keypair, zeros96) == 0);
- CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_keypair_create(sign, &keypair, overflows) == 0);
- CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
/* Test keypair_pub */
ecount = 0;
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1);
CHECK(secp256k1_keypair_pub(none, NULL, &keypair) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_pub(none, &pk, NULL) == 0);
CHECK(ecount == 2);
- CHECK(memcmp(zeros96, &pk, sizeof(pk)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* Using an invalid keypair is fine for keypair_pub */
memset(&keypair, 0, sizeof(keypair));
CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1);
- CHECK(memcmp(zeros96, &pk, sizeof(pk)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* keypair holds the same pubkey as pubkey_create */
CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
CHECK(secp256k1_keypair_pub(none, &pk_tmp, &keypair) == 1);
- CHECK(memcmp(&pk, &pk_tmp, sizeof(pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
/** Test keypair_xonly_pub **/
ecount = 0;
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0);
@@ -379,13 +379,13 @@ void test_keypair(void) {
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(ecount == 2);
- CHECK(memcmp(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
/* Using an invalid keypair will set the xonly_pk to 0 (first reset
* xonly_pk). */
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
memset(&keypair, 0, sizeof(keypair));
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0);
- CHECK(memcmp(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
+ CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(ecount == 3);
/** keypair holds the same xonly pubkey as pubkey_create **/
@@ -393,7 +393,7 @@ void test_keypair(void) {
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1);
- CHECK(memcmp(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
CHECK(pk_parity == pk_parity_tmp);
secp256k1_context_destroy(none);
@@ -414,8 +414,8 @@ void test_keypair_add(void) {
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
CHECK(sizeof(zeros96) == sizeof(keypair));
- secp256k1_rand256(sk);
- secp256k1_rand256(tweak);
+ secp256k1_testrand256(sk);
+ secp256k1_testrand256(tweak);
memset(overflows, 0xFF, 32);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
@@ -429,12 +429,12 @@ void test_keypair_add(void) {
CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0);
CHECK(ecount == 4);
/* This does not set the keypair to zeroes */
- CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) != 0);
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0);
/* Invalid tweak zeroes the keypair */
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0);
- CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* A zero tweak is fine */
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
@@ -444,7 +444,7 @@ void test_keypair_add(void) {
for (i = 0; i < count; i++) {
secp256k1_scalar scalar_tweak;
secp256k1_keypair keypair_tmp;
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
memcpy(&keypair_tmp, &keypair, sizeof(keypair));
/* Because sk may be negated before adding, we need to try with tweak =
@@ -454,17 +454,17 @@ void test_keypair_add(void) {
secp256k1_scalar_get_b32(tweak, &scalar_tweak);
CHECK((secp256k1_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0)
|| (secp256k1_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0));
- CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0
- || memcmp(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0
+ || secp256k1_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
}
/* Invalid keypair with a valid tweak */
memset(&keypair, 0, sizeof(keypair));
- secp256k1_rand256(tweak);
+ secp256k1_testrand256(tweak);
ecount = 0;
CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(ecount == 1);
- CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0);
+ CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* Only seckey part of keypair invalid */
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
memset(&keypair, 0, 32);
@@ -486,7 +486,7 @@ void test_keypair_add(void) {
unsigned char pk32[32];
int pk_parity;
- secp256k1_rand256(tweak);
+ secp256k1_testrand256(tweak);
CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
CHECK(secp256k1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
@@ -498,11 +498,11 @@ void test_keypair_add(void) {
/* Check that the resulting pubkey matches xonly_pubkey_tweak_add */
CHECK(secp256k1_keypair_pub(ctx, &output_pk_xy, &keypair) == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1);
- CHECK(memcmp(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
/* Check that the secret key in the keypair is tweaked correctly */
CHECK(secp256k1_ec_pubkey_create(ctx, &output_pk_expected, &keypair.data[0]) == 1);
- CHECK(memcmp(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
+ CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
}
secp256k1_context_destroy(none);
secp256k1_context_destroy(sign);
diff --git a/src/secp256k1/src/modules/recovery/Makefile.am.include b/src/secp256k1/src/modules/recovery/Makefile.am.include
index bf23c26e71..e2d3f1248d 100644
--- a/src/secp256k1/src/modules/recovery/Makefile.am.include
+++ b/src/secp256k1/src/modules/recovery/Makefile.am.include
@@ -1,6 +1,7 @@
include_HEADERS += include/secp256k1_recovery.h
noinst_HEADERS += src/modules/recovery/main_impl.h
noinst_HEADERS += src/modules/recovery/tests_impl.h
+noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h
if USE_BENCHMARK
noinst_PROGRAMS += bench_recover
bench_recover_SOURCES = src/bench_recover.c
diff --git a/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
new file mode 100644
index 0000000000..a2f381d77a
--- /dev/null
+++ b/src/secp256k1/src/modules/recovery/tests_exhaustive_impl.h
@@ -0,0 +1,149 @@
+/**********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
+#define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
+
+#include "src/modules/recovery/main_impl.h"
+#include "include/secp256k1_recovery.h"
+
+void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
+ int i, j, k;
+ uint64_t iter = 0;
+
+ /* Loop */
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
+ for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
+ if (skip_section(&iter)) continue;
+ for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
+ const int starting_k = k;
+ secp256k1_fe r_dot_y_normalized;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ int expected_recid;
+ int recid;
+ int overflow;
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ /* Check directly */
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
+ r_from_k(&expected_r, group, k, &overflow);
+ CHECK(r == expected_r);
+ CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
+ /* The recid's second bit is for conveying overflow (R.x value >= group order).
+ * In the actual secp256k1 this is an astronomically unlikely event, but in the
+ * small group used here, it will be the case for all points except the ones where
+ * R.x=1 (which the group is specifically selected to have).
+ * Note that this isn't actually useful; full recovery would need to convey
+ * floor(R.x / group_order), but only one bit is used as that is sufficient
+ * in the real group. */
+ expected_recid = overflow ? 2 : 0;
+ r_dot_y_normalized = group[k].y;
+ secp256k1_fe_normalize(&r_dot_y_normalized);
+ /* Also the recovery id is flipped depending if we hit the low-s branch */
+ if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
+ expected_recid |= secp256k1_fe_is_odd(&r_dot_y_normalized);
+ } else {
+ expected_recid |= !secp256k1_fe_is_odd(&r_dot_y_normalized);
+ }
+ CHECK(recid == expected_recid);
+
+ /* Convert to a standard sig then check */
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k, NULL);
+ CHECK(r == expected_r);
+ CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
+
+ /* Overflow means we've tried every possible nonce */
+ if (k < starting_k) {
+ break;
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group) {
+ /* This is essentially a copy of test_exhaustive_verify, with recovery added */
+ int s, r, msg, key;
+ uint64_t iter = 0;
+ for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
+ for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
+ for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
+ for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int recid = 0;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ if (skip_section(&iter)) continue;
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k, NULL);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* We would like to try recovering the pubkey and checking that it matches,
+ * but pubkey recovery is impossible in the exhaustive tests (the reason
+ * being that there are 12 nonzero r values, 12 nonzero points, and no
+ * overlap between the sets, so there are no valid signatures). */
+
+ /* Verify by converting to a standard signature and calling verify */
+ secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+
+static void test_exhaustive_recovery(const secp256k1_context *ctx, const secp256k1_ge *group) {
+ test_exhaustive_recovery_sign(ctx, group);
+ test_exhaustive_recovery_verify(ctx, group);
+}
+
+#endif /* SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H */
diff --git a/src/secp256k1/src/modules/recovery/tests_impl.h b/src/secp256k1/src/modules/recovery/tests_impl.h
index 38a533a755..09cae38403 100644
--- a/src/secp256k1/src/modules/recovery/tests_impl.h
+++ b/src/secp256k1/src/modules/recovery/tests_impl.h
@@ -25,7 +25,7 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
- return secp256k1_rand_bits(1);
+ return secp256k1_testrand_bits(1);
}
void test_ecdsa_recovery_api(void) {
@@ -184,7 +184,7 @@ void test_ecdsa_recovery_end_to_end(void) {
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
- CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
+ CHECK(secp256k1_memcmp_var(&signature[4], &signature[0], 64) == 0);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
memset(&rsignature[4], 0, sizeof(rsignature[4]));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
@@ -193,16 +193,16 @@ void test_ecdsa_recovery_end_to_end(void) {
/* Parse compact (with recovery id) and recover. */
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
- CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
- sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
+ sig[secp256k1_testrand_bits(6)] += 1 + secp256k1_testrand_int(255);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
/* Recover again */
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
- memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
+ secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
}
/* Tests several edge cases. */
diff --git a/src/secp256k1/src/modules/schnorrsig/Makefile.am.include b/src/secp256k1/src/modules/schnorrsig/Makefile.am.include
index a82bafe43f..568bcc3523 100644
--- a/src/secp256k1/src/modules/schnorrsig/Makefile.am.include
+++ b/src/secp256k1/src/modules/schnorrsig/Makefile.am.include
@@ -1,6 +1,7 @@
include_HEADERS += include/secp256k1_schnorrsig.h
noinst_HEADERS += src/modules/schnorrsig/main_impl.h
noinst_HEADERS += src/modules/schnorrsig/tests_impl.h
+noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h
if USE_BENCHMARK
noinst_PROGRAMS += bench_schnorrsig
bench_schnorrsig_SOURCES = src/bench_schnorrsig.c
diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h
index a0218f881a..b0d8481f9b 100644
--- a/src/secp256k1/src/modules/schnorrsig/main_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h
@@ -68,7 +68,7 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms
/* Tag the hash with algo16 which is important to avoid nonce reuse across
* algorithms. If this nonce function is used in BIP-340 signing as defined
* in the spec, an optimized tagging implementation is used. */
- if (memcmp(algo16, bip340_algo16, 16) == 0) {
+ if (secp256k1_memcmp_var(algo16, bip340_algo16, 16) == 0) {
secp256k1_nonce_function_bip340_sha256_tagged(&sha);
} else {
int algo16_len = 16;
@@ -108,6 +108,22 @@ static void secp256k1_schnorrsig_sha256_tagged(secp256k1_sha256 *sha) {
sha->bytes = 64;
}
+static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32)
+{
+ unsigned char buf[32];
+ secp256k1_sha256 sha;
+
+ /* tagged hash(r.x, pk.x, msg32) */
+ secp256k1_schnorrsig_sha256_tagged(&sha);
+ secp256k1_sha256_write(&sha, r32, 32);
+ secp256k1_sha256_write(&sha, pubkey32, 32);
+ secp256k1_sha256_write(&sha, msg32, 32);
+ secp256k1_sha256_finalize(&sha, buf);
+ /* Set scalar e to the challenge hash modulo the curve order as per
+ * BIP340. */
+ secp256k1_scalar_set_b32(e, buf, NULL);
+}
+
int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) {
secp256k1_scalar sk;
secp256k1_scalar e;
@@ -115,7 +131,6 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
secp256k1_gej rj;
secp256k1_ge pk;
secp256k1_ge r;
- secp256k1_sha256 sha;
unsigned char buf[32] = { 0 };
unsigned char pk_buf[32];
unsigned char seckey[32];
@@ -159,16 +174,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
secp256k1_fe_normalize_var(&r.x);
secp256k1_fe_get_b32(&sig64[0], &r.x);
- /* tagged hash(r.x, pk.x, msg32) */
- secp256k1_schnorrsig_sha256_tagged(&sha);
- secp256k1_sha256_write(&sha, &sig64[0], 32);
- secp256k1_sha256_write(&sha, pk_buf, sizeof(pk_buf));
- secp256k1_sha256_write(&sha, msg32, 32);
- secp256k1_sha256_finalize(&sha, buf);
-
- /* Set scalar e to the challenge hash modulo the curve order as per
- * BIP340. */
- secp256k1_scalar_set_b32(&e, buf, NULL);
+ secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf);
secp256k1_scalar_mul(&e, &e, &sk);
secp256k1_scalar_add(&e, &e, &k);
secp256k1_scalar_get_b32(&sig64[32], &e);
@@ -189,7 +195,6 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha
secp256k1_gej pkj;
secp256k1_fe rx;
secp256k1_ge r;
- secp256k1_sha256 sha;
unsigned char buf[32];
int overflow;
@@ -212,13 +217,9 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha
return 0;
}
- secp256k1_schnorrsig_sha256_tagged(&sha);
- secp256k1_sha256_write(&sha, &sig64[0], 32);
+ /* Compute e. */
secp256k1_fe_get_b32(buf, &pk.x);
- secp256k1_sha256_write(&sha, buf, sizeof(buf));
- secp256k1_sha256_write(&sha, msg32, 32);
- secp256k1_sha256_finalize(&sha, buf);
- secp256k1_scalar_set_b32(&e, buf, NULL);
+ secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, buf);
/* Compute rj = s*G + (-e)*pkj */
secp256k1_scalar_negate(&e, &e);
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
new file mode 100644
index 0000000000..4bf0bc1680
--- /dev/null
+++ b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
@@ -0,0 +1,206 @@
+/**********************************************************************
+ * Copyright (c) 2020 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
+#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
+
+#include "include/secp256k1_schnorrsig.h"
+#include "src/modules/schnorrsig/main_impl.h"
+
+static const unsigned char invalid_pubkey_bytes[][32] = {
+ /* 0 */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ /* 2 */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+ },
+ /* order */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 24) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 16) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 0UL) >> 8) & 0xFF,
+ (EXHAUSTIVE_TEST_ORDER + 0UL) & 0xFF
+ },
+ /* order + 1 */
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 24) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 16) & 0xFF,
+ ((EXHAUSTIVE_TEST_ORDER + 1UL) >> 8) & 0xFF,
+ (EXHAUSTIVE_TEST_ORDER + 1UL) & 0xFF
+ },
+ /* field size */
+ {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F
+ },
+ /* field size + 1 (note that 1 is legal) */
+ {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30
+ },
+ /* 2^256 - 1 */
+ {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ }
+};
+
+#define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0]))
+
+static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
+ const unsigned char *key32, const unsigned char *xonly_pk32,
+ const unsigned char *algo16, void* data) {
+ secp256k1_scalar s;
+ int *idata = data;
+ (void)msg32;
+ (void)key32;
+ (void)xonly_pk32;
+ (void)algo16;
+ secp256k1_scalar_set_int(&s, *idata);
+ secp256k1_scalar_get_b32(nonce32, &s);
+ return 1;
+}
+
+static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, const secp256k1_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) {
+ int d;
+ uint64_t iter = 0;
+ /* Iterate over the possible public keys to verify against (through their corresponding DL d). */
+ for (d = 1; d <= EXHAUSTIVE_TEST_ORDER / 2; ++d) {
+ int actual_d;
+ unsigned k;
+ unsigned char pk32[32];
+ memcpy(pk32, xonly_pubkey_bytes[d - 1], 32);
+ actual_d = parities[d - 1] ? EXHAUSTIVE_TEST_ORDER - d : d;
+ /* Iterate over the possible valid first 32 bytes in the signature, through their corresponding DL k.
+ Values above EXHAUSTIVE_TEST_ORDER/2 refer to the entries in invalid_pubkey_bytes. */
+ for (k = 1; k <= EXHAUSTIVE_TEST_ORDER / 2 + NUM_INVALID_KEYS; ++k) {
+ unsigned char sig64[64];
+ int actual_k = -1;
+ int e_done[EXHAUSTIVE_TEST_ORDER] = {0};
+ int e_count_done = 0;
+ if (skip_section(&iter)) continue;
+ if (k <= EXHAUSTIVE_TEST_ORDER / 2) {
+ memcpy(sig64, xonly_pubkey_bytes[k - 1], 32);
+ actual_k = parities[k - 1] ? EXHAUSTIVE_TEST_ORDER - k : k;
+ } else {
+ memcpy(sig64, invalid_pubkey_bytes[k - 1 - EXHAUSTIVE_TEST_ORDER / 2], 32);
+ }
+ /* Randomly generate messages until all challenges have been hit. */
+ while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
+ secp256k1_scalar e;
+ unsigned char msg32[32];
+ secp256k1_testrand256(msg32);
+ secp256k1_schnorrsig_challenge(&e, sig64, msg32, pk32);
+ /* Only do work if we hit a challenge we haven't tried before. */
+ if (!e_done[e]) {
+ /* Iterate over the possible valid last 32 bytes in the signature.
+ 0..order=that s value; order+1=random bytes */
+ int count_valid = 0, s;
+ for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) {
+ int expect_valid, valid;
+ if (s <= EXHAUSTIVE_TEST_ORDER) {
+ secp256k1_scalar s_s;
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_get_b32(sig64 + 32, &s_s);
+ expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER &&
+ (s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
+ } else {
+ secp256k1_testrand256(sig64 + 32);
+ expect_valid = 0;
+ }
+ valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]);
+ CHECK(valid == expect_valid);
+ count_valid += valid;
+ }
+ /* Exactly one s value must verify, unless R is illegal. */
+ CHECK(count_valid == (actual_k != -1));
+ /* Don't retry other messages that result in the same challenge. */
+ e_done[e] = 1;
+ ++e_count_done;
+ }
+ }
+ }
+ }
+}
+
+static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const secp256k1_keypair* keypairs, const int* parities) {
+ int d, k;
+ uint64_t iter = 0;
+ /* Loop over keys. */
+ for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) {
+ int actual_d = d;
+ if (parities[d - 1]) actual_d = EXHAUSTIVE_TEST_ORDER - d;
+ /* Loop over nonces. */
+ for (k = 1; k < EXHAUSTIVE_TEST_ORDER; ++k) {
+ int e_done[EXHAUSTIVE_TEST_ORDER] = {0};
+ int e_count_done = 0;
+ unsigned char msg32[32];
+ unsigned char sig64[64];
+ int actual_k = k;
+ if (skip_section(&iter)) continue;
+ if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k;
+ /* Generate random messages until all challenges have been tried. */
+ while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
+ secp256k1_scalar e;
+ secp256k1_testrand256(msg32);
+ secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]);
+ /* Only do work if we hit a challenge we haven't tried before. */
+ if (!e_done[e]) {
+ secp256k1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER;
+ unsigned char expected_s_bytes[32];
+ secp256k1_scalar_get_b32(expected_s_bytes, &expected_s);
+ /* Invoke the real function to construct a signature. */
+ CHECK(secp256k1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], secp256k1_hardened_nonce_function_smallint, &k));
+ /* The first 32 bytes must match the xonly pubkey for the specified k. */
+ CHECK(secp256k1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0);
+ /* The last 32 bytes must match the expected s value. */
+ CHECK(secp256k1_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0);
+ /* Don't retry other messages that result in the same challenge. */
+ e_done[e] = 1;
+ ++e_count_done;
+ }
+ }
+ }
+ }
+}
+
+static void test_exhaustive_schnorrsig(const secp256k1_context *ctx) {
+ secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
+ secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
+ int parity[EXHAUSTIVE_TEST_ORDER - 1];
+ unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
+ unsigned i;
+
+ /* Verify that all invalid_pubkey_bytes are actually invalid. */
+ for (i = 0; i < NUM_INVALID_KEYS; ++i) {
+ secp256k1_xonly_pubkey pk;
+ CHECK(!secp256k1_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i]));
+ }
+
+ /* Construct keypairs and xonly-pubkeys for the entire group. */
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) {
+ secp256k1_scalar scalar_i;
+ unsigned char buf[32];
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_scalar_get_b32(buf, &scalar_i);
+ CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf));
+ CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1]));
+ CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
+ }
+
+ test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity);
+ test_exhaustive_schnorrsig_verify(ctx, xonly_pubkey, xonly_pubkey_bytes, parity);
+}
+
+#endif
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
index 88d8f56404..f522fcb320 100644
--- a/src/secp256k1/src/modules/schnorrsig/tests_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
@@ -15,9 +15,9 @@
void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) {
unsigned char nonces[2][32];
CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1);
- secp256k1_rand_flip(args[n_flip], n_bytes);
+ secp256k1_testrand_flip(args[n_flip], n_bytes);
CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1);
- CHECK(memcmp(nonces[0], nonces[1], 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0);
}
/* Tests for the equality of two sha256 structs. This function only produces a
@@ -28,7 +28,7 @@ void test_sha256_eq(const secp256k1_sha256 *sha1, const secp256k1_sha256 *sha2)
CHECK((sha1->bytes & 0x3F) == 0);
CHECK(sha1->bytes == sha2->bytes);
- CHECK(memcmp(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
+ CHECK(secp256k1_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
}
void run_nonce_function_bip340_tests(void) {
@@ -59,10 +59,10 @@ void run_nonce_function_bip340_tests(void) {
secp256k1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized);
test_sha256_eq(&sha, &sha_optimized);
- secp256k1_rand256(msg);
- secp256k1_rand256(key);
- secp256k1_rand256(pk);
- secp256k1_rand256(aux_rand);
+ secp256k1_testrand256(msg);
+ secp256k1_testrand256(key);
+ secp256k1_testrand256(pk);
+ secp256k1_testrand256(aux_rand);
/* Check that a bitflip in an argument results in different nonces. */
args[0] = msg;
@@ -124,10 +124,10 @@ void test_schnorrsig_api(void) {
secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
- secp256k1_rand256(sk1);
- secp256k1_rand256(sk2);
- secp256k1_rand256(sk3);
- secp256k1_rand256(msg);
+ secp256k1_testrand256(sk1);
+ secp256k1_testrand256(sk2);
+ secp256k1_testrand256(sk3);
+ secp256k1_testrand256(msg);
CHECK(secp256k1_keypair_create(ctx, &keypairs[0], sk1) == 1);
CHECK(secp256k1_keypair_create(ctx, &keypairs[1], sk2) == 1);
CHECK(secp256k1_keypair_create(ctx, &keypairs[2], sk3) == 1);
@@ -197,11 +197,11 @@ void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const un
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand));
- CHECK(memcmp(sig, expected_sig, 64) == 0);
+ CHECK(secp256k1_memcmp_var(sig, expected_sig, 64) == 0);
CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized));
CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
- CHECK(memcmp(&pk, &pk_expected, sizeof(pk)) == 0);
+ CHECK(secp256k1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0);
CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &pk));
}
@@ -675,19 +675,19 @@ void test_schnorrsig_sign(void) {
unsigned char sig[64];
unsigned char zeros64[64] = { 0 };
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
/* Test different nonce functions */
memset(sig, 1, sizeof(sig));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0);
- CHECK(memcmp(sig, zeros64, sizeof(sig)) == 0);
+ CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
memset(&sig, 1, sizeof(sig));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0);
- CHECK(memcmp(sig, zeros64, sizeof(sig)) == 0);
+ CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1);
- CHECK(memcmp(sig, zeros64, sizeof(sig)) != 0);
+ CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) != 0);
}
#define N_SIGS 3
@@ -703,12 +703,12 @@ void test_schnorrsig_sign_verify(void) {
secp256k1_xonly_pubkey pk;
secp256k1_scalar s;
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
for (i = 0; i < N_SIGS; i++) {
- secp256k1_rand256(msg[i]);
+ secp256k1_testrand256(msg[i]);
CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL));
CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], &pk));
}
@@ -716,19 +716,19 @@ void test_schnorrsig_sign_verify(void) {
{
/* Flip a few bits in the signature and in the message and check that
* verify and verify_batch (TODO) fail */
- size_t sig_idx = secp256k1_rand_int(N_SIGS);
- size_t byte_idx = secp256k1_rand_int(32);
- unsigned char xorbyte = secp256k1_rand_int(254)+1;
+ size_t sig_idx = secp256k1_testrand_int(N_SIGS);
+ size_t byte_idx = secp256k1_testrand_int(32);
+ unsigned char xorbyte = secp256k1_testrand_int(254)+1;
sig[sig_idx][byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
sig[sig_idx][byte_idx] ^= xorbyte;
- byte_idx = secp256k1_rand_int(32);
+ byte_idx = secp256k1_testrand_int(32);
sig[sig_idx][32+byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
sig[sig_idx][32+byte_idx] ^= xorbyte;
- byte_idx = secp256k1_rand_int(32);
+ byte_idx = secp256k1_testrand_int(32);
msg[sig_idx][byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
msg[sig_idx][byte_idx] ^= xorbyte;
@@ -766,7 +766,7 @@ void test_schnorrsig_taproot(void) {
unsigned char sig[64];
/* Create output key */
- secp256k1_rand256(sk);
+ secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
/* In actual taproot the tweak would be hash of internal_pk */
@@ -776,7 +776,7 @@ void test_schnorrsig_taproot(void) {
CHECK(secp256k1_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1);
/* Key spend */
- secp256k1_rand256(msg);
+ secp256k1_testrand256(msg);
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
/* Verify key spend */
CHECK(secp256k1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1);
diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h
index 95d3e326c9..fb3fb187ce 100644
--- a/src/secp256k1/src/scalar.h
+++ b/src/secp256k1/src/scalar.h
@@ -102,12 +102,11 @@ static void secp256k1_scalar_order_get_num(secp256k1_num *r);
/** Compare two scalars. */
static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
-#ifdef USE_ENDOMORPHISM
-/** Find r1 and r2 such that r1+r2*2^128 = a. */
-static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
-/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
-static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
-#endif
+/** Find r1 and r2 such that r1+r2*2^128 = k. */
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
+/** Find r1 and r2 such that r1+r2*lambda = k,
+ * where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h
index 7f39927861..73cbd5e18a 100644
--- a/src/secp256k1/src/scalar_4x64_impl.h
+++ b/src/secp256k1/src/scalar_4x64_impl.h
@@ -912,18 +912,16 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
secp256k1_scalar_reduce_512(r, l);
}
-#ifdef USE_ENDOMORPHISM
-static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
- r1->d[0] = a->d[0];
- r1->d[1] = a->d[1];
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ r1->d[0] = k->d[0];
+ r1->d[1] = k->d[1];
r1->d[2] = 0;
r1->d[3] = 0;
- r2->d[0] = a->d[2];
- r2->d[1] = a->d[3];
+ r2->d[0] = k->d[2];
+ r2->d[1] = k->d[3];
r2->d[2] = 0;
r2->d[3] = 0;
}
-#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h
index f8c7fa7efa..6853f79ecc 100644
--- a/src/secp256k1/src/scalar_8x32_impl.h
+++ b/src/secp256k1/src/scalar_8x32_impl.h
@@ -672,26 +672,24 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
secp256k1_scalar_reduce_512(r, l);
}
-#ifdef USE_ENDOMORPHISM
-static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
- r1->d[0] = a->d[0];
- r1->d[1] = a->d[1];
- r1->d[2] = a->d[2];
- r1->d[3] = a->d[3];
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ r1->d[0] = k->d[0];
+ r1->d[1] = k->d[1];
+ r1->d[2] = k->d[2];
+ r1->d[3] = k->d[3];
r1->d[4] = 0;
r1->d[5] = 0;
r1->d[6] = 0;
r1->d[7] = 0;
- r2->d[0] = a->d[4];
- r2->d[1] = a->d[5];
- r2->d[2] = a->d[6];
- r2->d[3] = a->d[7];
+ r2->d[0] = k->d[4];
+ r2->d[1] = k->d[5];
+ r2->d[2] = k->d[6];
+ r2->d[3] = k->d[7];
r2->d[4] = 0;
r2->d[5] = 0;
r2->d[6] = 0;
r2->d[7] = 0;
}
-#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h
index 2ec04b1ae9..fc75891818 100644
--- a/src/secp256k1/src/scalar_impl.h
+++ b/src/secp256k1/src/scalar_impl.h
@@ -7,6 +7,10 @@
#ifndef SECP256K1_SCALAR_IMPL_H
#define SECP256K1_SCALAR_IMPL_H
+#ifdef VERIFY
+#include <string.h>
+#endif
+
#include "scalar.h"
#include "util.h"
@@ -252,37 +256,65 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#endif
}
-#ifdef USE_ENDOMORPHISM
+/* These parameters are generated using sage/gen_exhaustive_groups.sage. */
#if defined(EXHAUSTIVE_TEST_ORDER)
+# if EXHAUSTIVE_TEST_ORDER == 13
+# define EXHAUSTIVE_TEST_LAMBDA 9
+# elif EXHAUSTIVE_TEST_ORDER == 199
+# define EXHAUSTIVE_TEST_LAMBDA 92
+# else
+# error No known lambda for the specified exhaustive test group order.
+# endif
+
/**
- * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
- * full case we don't bother making k1 and k2 be small, we just want them to be
+ * Find r1 and r2 given k, such that r1 + r2 * lambda == k mod n; unlike in the
+ * full case we don't bother making r1 and r2 be small, we just want them to be
* nontrivial to get full test coverage for the exhaustive tests. We therefore
- * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
+ * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
*/
-static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
- *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
- *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
+ *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
}
#else
/**
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
- * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
- * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
+ * lambda is: */
+static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST(
+ 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
+ 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
+);
+
+#ifdef VERIFY
+static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k);
+#endif
+
+/*
+ * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and
+ * beta^3 == 1 mod p, where n is the curve order and p is the field order.
*
- * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
- * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
- * and k2 have a small size.
- * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
+ * Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are
+ * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p.
+ * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.)
+ *
+ * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring
+ * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi
+ * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a
+ * reduced basis {a1 + b1*l, a2 + b2*l} where
*
* - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
* - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
* - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
* - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
*
- * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
+ * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
+ * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
+ * and k2 are small in absolute value.
+ *
+ * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives
* k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
- * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
+ * compute r2 = k2 mod n, and r1 = k1 mod n = (k - r2 * lambda) mod n, avoiding the need for
+ * the constants a1 and a2.
*
* g1, g2 are precomputed constants used to replace division with a rounded multiplication
* when decomposing the scalar for an endomorphism-based point multiplication.
@@ -294,21 +326,21 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
* Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
* Section 4.3 (here we use a somewhat higher-precision estimate):
* d = a1*b2 - b1*a2
- * g1 = round((2^272)*b2/d)
- * g2 = round((2^272)*b1/d)
+ * g1 = round(2^384 * b2/d)
+ * g2 = round(2^384 * (-b1)/d)
*
- * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
- * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
+ * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2]
+ * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda).
*
- * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
+ * The function below splits k into r1 and r2, such that
+ * - r1 + lambda * r2 == k (mod n)
+ * - either r1 < 2^128 or -r1 mod n < 2^128
+ * - either r2 < 2^128 or -r2 mod n < 2^128
+ *
+ * See proof below.
*/
-
-static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar c1, c2;
- static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
- 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
- 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
- );
static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
@@ -318,25 +350,167 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
);
static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
- 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
- 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
+ 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
+ 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
);
static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
- 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
- 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
+ 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
+ 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
);
- VERIFY_CHECK(r1 != a);
- VERIFY_CHECK(r2 != a);
+ VERIFY_CHECK(r1 != k);
+ VERIFY_CHECK(r2 != k);
/* these _var calls are constant time since the shift amount is constant */
- secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
- secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
+ secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
+ secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
secp256k1_scalar_mul(&c1, &c1, &minus_b1);
secp256k1_scalar_mul(&c2, &c2, &minus_b2);
secp256k1_scalar_add(r2, &c1, &c2);
- secp256k1_scalar_mul(r1, r2, &minus_lambda);
- secp256k1_scalar_add(r1, r1, a);
-}
-#endif
+ secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
+ secp256k1_scalar_negate(r1, r1);
+ secp256k1_scalar_add(r1, r1, k);
+
+#ifdef VERIFY
+ secp256k1_scalar_split_lambda_verify(r1, r2, k);
#endif
+}
+
+#ifdef VERIFY
+/*
+ * Proof for secp256k1_scalar_split_lambda's bounds.
+ *
+ * Let
+ * - epsilon1 = 2^256 * |g1/2^384 - b2/d|
+ * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d|
+ * - c1 = round(k*g1/2^384)
+ * - c2 = round(k*g2/2^384)
+ *
+ * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1
+ *
+ * |c1 - k*b2/d|
+ * =
+ * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d|
+ * <= {triangle inequality}
+ * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d|
+ * =
+ * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d|
+ * < {rounding in c1 and 0 <= k < 2^256}
+ * 2^-1 + 2^256 * |g1/2^384 - b2/d|
+ * = {definition of epsilon1}
+ * 2^-1 + epsilon1
+ *
+ * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2
+ *
+ * |c2 - k*(-b1)/d|
+ * =
+ * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d|
+ * <= {triangle inequality}
+ * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d|
+ * =
+ * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d|
+ * < {rounding in c2 and 0 <= k < 2^256}
+ * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d|
+ * = {definition of epsilon2}
+ * 2^-1 + epsilon2
+ *
+ * Let
+ * - k1 = k - c1*a1 - c2*a2
+ * - k2 = - c1*b1 - c2*b2
+ *
+ * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128
+ *
+ * |k1|
+ * = {definition of k1}
+ * |k - c1*a1 - c2*a2|
+ * = {(a1*b2 - b1*a2)/n = 1}
+ * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2|
+ * =
+ * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)|
+ * <= {triangle inequality}
+ * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2|
+ * < {Lemma 1 and Lemma 2}
+ * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2)
+ * < {rounding up to an integer}
+ * (a1 + a2 + 1)/2
+ * < {rounding up to a power of 2}
+ * 2^128
+ *
+ * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128
+ *
+ * |k2|
+ * = {definition of k2}
+ * |- c1*a1 - c2*a2|
+ * = {(b1*b2 - b1*b2)/n = 0}
+ * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2|
+ * =
+ * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)|
+ * <= {triangle inequality}
+ * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2|
+ * < {Lemma 1 and Lemma 2}
+ * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2)
+ * < {rounding up to an integer}
+ * (-b1 + b2)/2 + 1
+ * < {rounding up to a power of 2}
+ * 2^128
+ *
+ * Let
+ * - r2 = k2 mod n
+ * - r1 = k - r2*lambda mod n.
+ *
+ * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n).
+ *
+ * Lemma 5: r1 == k1 mod n.
+ *
+ * r1
+ * == {definition of r1 and r2}
+ * k - k2*lambda
+ * == {definition of k2}
+ * k - (- c1*b1 - c2*b2)*lambda
+ * ==
+ * k + c1*b1*lambda + c2*b2*lambda
+ * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n}
+ * k - c1*a1 - c2*a2
+ * == {definition of k1}
+ * k1
+ *
+ * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that
+ *
+ * - either r1 < 2^128 or -r1 mod n < 2^128
+ * - either r2 < 2^128 or -r2 mod n < 2^128.
+ *
+ * Q.E.D.
+ */
+static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) {
+ secp256k1_scalar s;
+ unsigned char buf1[32];
+ unsigned char buf2[32];
+
+ /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */
+ static const unsigned char k1_bound[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77
+ };
+
+ /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */
+ static const unsigned char k2_bound[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
+ };
+
+ secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2);
+ secp256k1_scalar_add(&s, &s, r1);
+ VERIFY_CHECK(secp256k1_scalar_eq(&s, k));
+
+ secp256k1_scalar_negate(&s, r1);
+ secp256k1_scalar_get_b32(buf1, r1);
+ secp256k1_scalar_get_b32(buf2, &s);
+ VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0);
+
+ secp256k1_scalar_negate(&s, r2);
+ secp256k1_scalar_get_b32(buf1, r2);
+ secp256k1_scalar_get_b32(buf2, &s);
+ VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0);
+}
+#endif /* VERIFY */
+#endif /* !defined(EXHAUSTIVE_TEST_ORDER) */
#endif /* SECP256K1_SCALAR_IMPL_H */
diff --git a/src/secp256k1/src/scalar_low_impl.h b/src/secp256k1/src/scalar_low_impl.h
index b79cf1ff6c..a615ec074b 100644
--- a/src/secp256k1/src/scalar_low_impl.h
+++ b/src/secp256k1/src/scalar_low_impl.h
@@ -48,14 +48,17 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
- const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
int i;
+ int over = 0;
*r = 0;
for (i = 0; i < 32; i++) {
- *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
+ *r = (*r * 0x100) + b32[i];
+ if (*r >= EXHAUSTIVE_TEST_ORDER) {
+ over = 1;
+ *r %= EXHAUSTIVE_TEST_ORDER;
+ }
}
- /* just deny overflow, it basically always happens */
- if (overflow) *overflow = 0;
+ if (overflow) *overflow = over;
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
diff --git a/src/secp256k1/src/scratch_impl.h b/src/secp256k1/src/scratch_impl.h
index b205620224..f381e2e322 100644
--- a/src/secp256k1/src/scratch_impl.h
+++ b/src/secp256k1/src/scratch_impl.h
@@ -26,7 +26,7 @@ static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* err
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) {
if (scratch != NULL) {
VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
@@ -36,7 +36,7 @@ static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback,
}
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) {
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
@@ -44,7 +44,7 @@ static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callb
}
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) {
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
@@ -56,7 +56,7 @@ static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_c
}
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) {
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
@@ -81,7 +81,7 @@ static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, s
}
size = rounded_size;
- if (memcmp(scratch->magic, "scratch", 8) != 0) {
+ if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return NULL;
}
diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c
index eaafb3a21d..dae506d08c 100644
--- a/src/secp256k1/src/secp256k1.c
+++ b/src/secp256k1/src/secp256k1.c
@@ -284,6 +284,9 @@ int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pu
if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
return 0;
}
+ if (!secp256k1_ge_is_in_correct_subgroup(&Q)) {
+ return 0;
+ }
secp256k1_pubkey_save(pubkey, &Q);
secp256k1_ge_clear(&Q);
return 1;
diff --git a/src/secp256k1/src/selftest.h b/src/secp256k1/src/selftest.h
index 885983aa20..0e37510c1e 100644
--- a/src/secp256k1/src/selftest.h
+++ b/src/secp256k1/src/selftest.h
@@ -22,7 +22,7 @@ static int secp256k1_selftest_sha256(void) {
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)input63, 63);
secp256k1_sha256_finalize(&hasher, out);
- return memcmp(out, output32, 32) == 0;
+ return secp256k1_memcmp_var(out, output32, 32) == 0;
}
static int secp256k1_selftest(void) {
diff --git a/src/secp256k1/src/testrand.h b/src/secp256k1/src/testrand.h
index bcbe15a6f1..a76003d5b8 100644
--- a/src/secp256k1/src/testrand.h
+++ b/src/secp256k1/src/testrand.h
@@ -14,28 +14,34 @@
/* A non-cryptographic RNG used only for test infrastructure. */
/** Seed the pseudorandom number generator for testing. */
-SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16);
+SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16);
/** Generate a pseudorandom number in the range [0..2**32-1]. */
-static uint32_t secp256k1_rand32(void);
+static uint32_t secp256k1_testrand32(void);
/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
* more. */
-static uint32_t secp256k1_rand_bits(int bits);
+static uint32_t secp256k1_testrand_bits(int bits);
/** Generate a pseudorandom number in the range [0..range-1]. */
-static uint32_t secp256k1_rand_int(uint32_t range);
+static uint32_t secp256k1_testrand_int(uint32_t range);
/** Generate a pseudorandom 32-byte array. */
-static void secp256k1_rand256(unsigned char *b32);
+static void secp256k1_testrand256(unsigned char *b32);
/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
-static void secp256k1_rand256_test(unsigned char *b32);
+static void secp256k1_testrand256_test(unsigned char *b32);
/** Generate pseudorandom bytes with long sequences of zero and one bits. */
-static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len);
+static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len);
/** Flip a single random bit in a byte array */
-static void secp256k1_rand_flip(unsigned char *b, size_t len);
+static void secp256k1_testrand_flip(unsigned char *b, size_t len);
+
+/** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */
+static void secp256k1_testrand_init(const char* hexseed);
+
+/** Print final test information. */
+static void secp256k1_testrand_finish(void);
#endif /* SECP256K1_TESTRAND_H */
diff --git a/src/secp256k1/src/testrand_impl.h b/src/secp256k1/src/testrand_impl.h
index dfb658d9c6..3392566329 100644
--- a/src/secp256k1/src/testrand_impl.h
+++ b/src/secp256k1/src/testrand_impl.h
@@ -8,6 +8,7 @@
#define SECP256K1_TESTRAND_IMPL_H
#include <stdint.h>
+#include <stdio.h>
#include <string.h>
#include "testrand.h"
@@ -19,11 +20,11 @@ static int secp256k1_test_rng_precomputed_used = 8;
static uint64_t secp256k1_test_rng_integer;
static int secp256k1_test_rng_integer_bits_left = 0;
-SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) {
+SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16) {
secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16);
}
-SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
+SECP256K1_INLINE static uint32_t secp256k1_testrand32(void) {
if (secp256k1_test_rng_precomputed_used == 8) {
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed));
secp256k1_test_rng_precomputed_used = 0;
@@ -31,10 +32,10 @@ SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++];
}
-static uint32_t secp256k1_rand_bits(int bits) {
+static uint32_t secp256k1_testrand_bits(int bits) {
uint32_t ret;
if (secp256k1_test_rng_integer_bits_left < bits) {
- secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left);
+ secp256k1_test_rng_integer |= (((uint64_t)secp256k1_testrand32()) << secp256k1_test_rng_integer_bits_left);
secp256k1_test_rng_integer_bits_left += 32;
}
ret = secp256k1_test_rng_integer;
@@ -44,7 +45,7 @@ static uint32_t secp256k1_rand_bits(int bits) {
return ret;
}
-static uint32_t secp256k1_rand_int(uint32_t range) {
+static uint32_t secp256k1_testrand_int(uint32_t range) {
/* We want a uniform integer between 0 and range-1, inclusive.
* B is the smallest number such that range <= 2**B.
* two mechanisms implemented here:
@@ -76,25 +77,25 @@ static uint32_t secp256k1_rand_int(uint32_t range) {
mult = 1;
}
while(1) {
- uint32_t x = secp256k1_rand_bits(bits);
+ uint32_t x = secp256k1_testrand_bits(bits);
if (x < trange) {
return (mult == 1) ? x : (x % range);
}
}
}
-static void secp256k1_rand256(unsigned char *b32) {
+static void secp256k1_testrand256(unsigned char *b32) {
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32);
}
-static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
+static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len) {
size_t bits = 0;
memset(bytes, 0, len);
while (bits < len * 8) {
int now;
uint32_t val;
- now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31;
- val = secp256k1_rand_bits(1);
+ now = 1 + (secp256k1_testrand_bits(6) * secp256k1_testrand_bits(5) + 16) / 31;
+ val = secp256k1_testrand_bits(1);
while (now > 0 && bits < len * 8) {
bytes[bits / 8] |= val << (bits % 8);
now--;
@@ -103,12 +104,55 @@ static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
}
}
-static void secp256k1_rand256_test(unsigned char *b32) {
- secp256k1_rand_bytes_test(b32, 32);
+static void secp256k1_testrand256_test(unsigned char *b32) {
+ secp256k1_testrand_bytes_test(b32, 32);
}
-static void secp256k1_rand_flip(unsigned char *b, size_t len) {
- b[secp256k1_rand_int(len)] ^= (1 << secp256k1_rand_int(8));
+static void secp256k1_testrand_flip(unsigned char *b, size_t len) {
+ b[secp256k1_testrand_int(len)] ^= (1 << secp256k1_testrand_int(8));
+}
+
+static void secp256k1_testrand_init(const char* hexseed) {
+ unsigned char seed16[16] = {0};
+ if (hexseed && strlen(hexseed) != 0) {
+ int pos = 0;
+ while (pos < 16 && hexseed[0] != 0 && hexseed[1] != 0) {
+ unsigned short sh;
+ if ((sscanf(hexseed, "%2hx", &sh)) == 1) {
+ seed16[pos] = sh;
+ } else {
+ break;
+ }
+ hexseed += 2;
+ pos++;
+ }
+ } else {
+ FILE *frand = fopen("/dev/urandom", "r");
+ if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
+ uint64_t t = time(NULL) * (uint64_t)1337;
+ fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
+ seed16[0] ^= t;
+ seed16[1] ^= t >> 8;
+ seed16[2] ^= t >> 16;
+ seed16[3] ^= t >> 24;
+ seed16[4] ^= t >> 32;
+ seed16[5] ^= t >> 40;
+ seed16[6] ^= t >> 48;
+ seed16[7] ^= t >> 56;
+ }
+ if (frand) {
+ fclose(frand);
+ }
+ }
+
+ printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+ secp256k1_testrand_seed(seed16);
+}
+
+static void secp256k1_testrand_finish(void) {
+ unsigned char run32[32];
+ secp256k1_testrand256(run32);
+ printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
}
#endif /* SECP256K1_TESTRAND_IMPL_H */
diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c
index 4780e9319b..bb4b5b4c07 100644
--- a/src/secp256k1/src/tests.c
+++ b/src/secp256k1/src/tests.c
@@ -54,7 +54,7 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) {
void random_field_element_test(secp256k1_fe *fe) {
do {
unsigned char b32[32];
- secp256k1_rand256_test(b32);
+ secp256k1_testrand256_test(b32);
if (secp256k1_fe_set_b32(fe, b32)) {
break;
}
@@ -63,7 +63,7 @@ void random_field_element_test(secp256k1_fe *fe) {
void random_field_element_magnitude(secp256k1_fe *fe) {
secp256k1_fe zero;
- int n = secp256k1_rand_int(9);
+ int n = secp256k1_testrand_int(9);
secp256k1_fe_normalize(fe);
if (n == 0) {
return;
@@ -81,11 +81,12 @@ void random_group_element_test(secp256k1_ge *ge) {
secp256k1_fe fe;
do {
random_field_element_test(&fe);
- if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) {
+ if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_testrand_bits(1))) {
secp256k1_fe_normalize(&ge->y);
break;
}
} while(1);
+ ge->infinity = 0;
}
void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
@@ -107,7 +108,7 @@ void random_scalar_order_test(secp256k1_scalar *num) {
do {
unsigned char b32[32];
int overflow = 0;
- secp256k1_rand256_test(b32);
+ secp256k1_testrand256_test(b32);
secp256k1_scalar_set_b32(num, b32, &overflow);
if (overflow || secp256k1_scalar_is_zero(num)) {
continue;
@@ -120,7 +121,7 @@ void random_scalar_order(secp256k1_scalar *num) {
do {
unsigned char b32[32];
int overflow = 0;
- secp256k1_rand256(b32);
+ secp256k1_testrand256(b32);
secp256k1_scalar_set_b32(num, b32, &overflow);
if (overflow || secp256k1_scalar_is_zero(num)) {
continue;
@@ -441,14 +442,14 @@ void run_sha256_tests(void) {
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
secp256k1_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
if (strlen(inputs[i]) > 0) {
- int split = secp256k1_rand_int(strlen(inputs[i]));
+ int split = secp256k1_testrand_int(strlen(inputs[i]));
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
secp256k1_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
}
}
}
@@ -485,14 +486,14 @@ void run_hmac_sha256_tests(void) {
secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
secp256k1_hmac_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
if (strlen(inputs[i]) > 0) {
- int split = secp256k1_rand_int(strlen(inputs[i]));
+ int split = secp256k1_testrand_int(strlen(inputs[i]));
secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
secp256k1_hmac_sha256_finalize(&hasher, out);
- CHECK(memcmp(out, outputs[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
}
}
}
@@ -519,21 +520,21 @@ void run_rfc6979_hmac_sha256_tests(void) {
secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64);
for (i = 0; i < 3; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
- CHECK(memcmp(out, out1[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, out1[i], 32) == 0);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65);
for (i = 0; i < 3; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
- CHECK(memcmp(out, out1[i], 32) != 0);
+ CHECK(secp256k1_memcmp_var(out, out1[i], 32) != 0);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64);
for (i = 0; i < 3; i++) {
secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
- CHECK(memcmp(out, out2[i], 32) == 0);
+ CHECK(secp256k1_memcmp_var(out, out2[i], 32) == 0);
}
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
}
@@ -557,7 +558,7 @@ void test_rand_bits(int rand32, int bits) {
/* Multiply the output of all rand calls with the odd number m, which
should not change the uniformity of its distribution. */
for (i = 0; i < rounds[usebits]; i++) {
- uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits));
+ uint32_t r = (rand32 ? secp256k1_testrand32() : secp256k1_testrand_bits(bits));
CHECK((((uint64_t)r) >> bits) == 0);
for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) {
uint32_t rm = r * mults[m];
@@ -582,7 +583,7 @@ void test_rand_int(uint32_t range, uint32_t subrange) {
uint64_t x = 0;
CHECK((range % subrange) == 0);
for (i = 0; i < rounds; i++) {
- uint32_t r = secp256k1_rand_int(range);
+ uint32_t r = secp256k1_testrand_int(range);
CHECK(r < range);
r = r % subrange;
x |= (((uint64_t)1) << r);
@@ -614,7 +615,7 @@ void run_rand_int(void) {
#ifndef USE_NUM_NONE
void random_num_negate(secp256k1_num *num) {
- if (secp256k1_rand_bits(1)) {
+ if (secp256k1_testrand_bits(1)) {
secp256k1_num_negate(num);
}
}
@@ -658,11 +659,11 @@ void test_num_add_sub(void) {
secp256k1_num n2;
secp256k1_num n1p2, n2p1, n1m2, n2m1;
random_num_order_test(&n1); /* n1 = R1 */
- if (secp256k1_rand_bits(1)) {
+ if (secp256k1_testrand_bits(1)) {
random_num_negate(&n1);
}
random_num_order_test(&n2); /* n2 = R2 */
- if (secp256k1_rand_bits(1)) {
+ if (secp256k1_testrand_bits(1)) {
random_num_negate(&n2);
}
secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */
@@ -853,7 +854,7 @@ void scalar_test(void) {
while (i < 256) {
secp256k1_scalar t;
int j;
- int now = secp256k1_rand_int(15) + 1;
+ int now = secp256k1_testrand_int(15) + 1;
if (now + i > 256) {
now = 256 - i;
}
@@ -930,7 +931,7 @@ void scalar_test(void) {
secp256k1_num rnum;
secp256k1_num rnum2;
unsigned char cone[1] = {0x01};
- unsigned int shift = 256 + secp256k1_rand_int(257);
+ unsigned int shift = 256 + secp256k1_testrand_int(257);
secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
secp256k1_num_mul(&rnum, &s1num, &s2num);
secp256k1_num_shift(&rnum, shift - 1);
@@ -948,7 +949,7 @@ void scalar_test(void) {
random_scalar_order_test(&r);
for (i = 0; i < 100; ++i) {
int low;
- int shift = 1 + secp256k1_rand_int(15);
+ int shift = 1 + secp256k1_testrand_int(15);
int expected = r.d[0] % (1 << shift);
low = secp256k1_scalar_shr_int(&r, shift);
CHECK(expected == low);
@@ -996,7 +997,7 @@ void scalar_test(void) {
secp256k1_scalar b;
int i;
/* Test add_bit. */
- int bit = secp256k1_rand_bits(8);
+ int bit = secp256k1_testrand_bits(8);
secp256k1_scalar_set_int(&b, 1);
CHECK(secp256k1_scalar_is_one(&b));
for (i = 0; i < bit; i++) {
@@ -1157,7 +1158,7 @@ void run_scalar_tests(void) {
secp256k1_scalar_set_b32(&scalar, bin, &overflow);
CHECK(overflow == 0);
secp256k1_scalar_get_b32(bin_tmp, &scalar);
- CHECK(memcmp(bin, bin_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(bin, bin_tmp, 32) == 0);
/* A scalar set to all 1s should overflow. */
memset(bin, 0xFF, 32);
@@ -1767,7 +1768,7 @@ void run_scalar_tests(void) {
void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_rand256(bin);
+ secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@@ -1777,7 +1778,7 @@ void random_fe(secp256k1_fe *x) {
void random_fe_test(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_rand256_test(bin);
+ secp256k1_testrand256_test(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@@ -1845,18 +1846,18 @@ void run_field_convert(void) {
CHECK(secp256k1_fe_equal_var(&fe, &fe2));
/* Check conversion from fe. */
secp256k1_fe_get_b32(b322, &fe);
- CHECK(memcmp(b322, b32, 32) == 0);
+ CHECK(secp256k1_memcmp_var(b322, b32, 32) == 0);
secp256k1_fe_to_storage(&fes2, &fe);
- CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0);
+ CHECK(secp256k1_memcmp_var(&fes2, &fes, sizeof(fes)) == 0);
}
-int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) {
+int fe_secp256k1_memcmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe t = *b;
#ifdef VERIFY
t.magnitude = a->magnitude;
t.normalized = a->normalized;
#endif
- return memcmp(a, &t, sizeof(secp256k1_fe));
+ return secp256k1_memcmp_var(a, &t, sizeof(secp256k1_fe));
}
void run_field_misc(void) {
@@ -1882,13 +1883,13 @@ void run_field_misc(void) {
CHECK(x.normalized && x.magnitude == 1);
#endif
secp256k1_fe_cmov(&x, &x, 1);
- CHECK(fe_memcmp(&x, &z) != 0);
- CHECK(fe_memcmp(&x, &q) == 0);
+ CHECK(fe_secp256k1_memcmp_var(&x, &z) != 0);
+ CHECK(fe_secp256k1_memcmp_var(&x, &q) == 0);
secp256k1_fe_cmov(&q, &z, 1);
#ifdef VERIFY
CHECK(!q.normalized && q.magnitude == z.magnitude);
#endif
- CHECK(fe_memcmp(&q, &z) == 0);
+ CHECK(fe_secp256k1_memcmp_var(&q, &z) == 0);
secp256k1_fe_normalize_var(&x);
secp256k1_fe_normalize_var(&z);
CHECK(!secp256k1_fe_equal_var(&x, &z));
@@ -1912,9 +1913,9 @@ void run_field_misc(void) {
secp256k1_fe_to_storage(&zs, &z);
secp256k1_fe_storage_cmov(&zs, &xs, 0);
secp256k1_fe_storage_cmov(&zs, &zs, 1);
- CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0);
+ CHECK(secp256k1_memcmp_var(&xs, &zs, sizeof(xs)) != 0);
secp256k1_fe_storage_cmov(&ys, &xs, 1);
- CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0);
+ CHECK(secp256k1_memcmp_var(&xs, &ys, sizeof(xs)) == 0);
secp256k1_fe_from_storage(&x, &xs);
secp256k1_fe_from_storage(&y, &ys);
secp256k1_fe_from_storage(&z, &zs);
@@ -1970,7 +1971,7 @@ void run_field_inv_all_var(void) {
secp256k1_fe_inv_all_var(xi, x, 0);
for (i = 0; i < count; i++) {
size_t j;
- size_t len = secp256k1_rand_int(15) + 1;
+ size_t len = secp256k1_testrand_int(15) + 1;
for (j = 0; j < len; j++) {
random_fe_non_zero(&x[j]);
}
@@ -2101,17 +2102,12 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
void test_ge(void) {
int i, i1;
-#ifdef USE_ENDOMORPHISM
int runs = 6;
-#else
- int runs = 4;
-#endif
- /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4).
- * The second in each pair of identical points uses a random Z coordinate in the Jacobian form.
- * All magnitudes are randomized.
- * All 17*17 combinations of points are added to each other, using all applicable methods.
- *
- * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well.
+ /* 25 points are used:
+ * - infinity
+ * - for each of four random points p1 p2 p3 p4, we add the point, its
+ * negation, and then those two again but with randomized Z coordinate.
+ * - The same is then done for lambda*p1 and lambda^2*p1.
*/
secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs));
secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs));
@@ -2126,14 +2122,12 @@ void test_ge(void) {
int j;
secp256k1_ge g;
random_group_element_test(&g);
-#ifdef USE_ENDOMORPHISM
if (i >= runs - 2) {
secp256k1_ge_mul_lambda(&g, &ge[1]);
}
if (i >= runs - 1) {
secp256k1_ge_mul_lambda(&g, &g);
}
-#endif
ge[1 + 4 * i] = g;
ge[2 + 4 * i] = g;
secp256k1_ge_neg(&ge[3 + 4 * i], &g);
@@ -2262,7 +2256,7 @@ void test_ge(void) {
gej_shuffled[i] = gej[i];
}
for (i = 0; i < 4 * runs + 1; i++) {
- int swap = i + secp256k1_rand_int(4 * runs + 1 - i);
+ int swap = i + secp256k1_testrand_int(4 * runs + 1 - i);
if (swap != i) {
secp256k1_gej t = gej_shuffled[i];
gej_shuffled[i] = gej_shuffled[swap];
@@ -2448,7 +2442,7 @@ void test_ec_combine(void) {
secp256k1_ge_set_gej(&Q, &Qj);
secp256k1_pubkey_save(&sd, &Q);
CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1);
- CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0);
+ CHECK(secp256k1_memcmp_var(&sd, &sd2, sizeof(sd)) == 0);
}
}
@@ -2614,7 +2608,6 @@ void test_point_times_order(const secp256k1_gej *point) {
secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */
secp256k1_gej_add_var(&res1, &res1, &res2, NULL);
CHECK(secp256k1_gej_is_infinity(&res1));
- CHECK(secp256k1_gej_is_valid_var(&res1) == 0);
secp256k1_ge_set_gej(&res3, &res1);
CHECK(secp256k1_ge_is_infinity(&res3));
CHECK(secp256k1_ge_is_valid_var(&res3) == 0);
@@ -2633,6 +2626,87 @@ void test_point_times_order(const secp256k1_gej *point) {
ge_equals_ge(&res3, &secp256k1_ge_const_g);
}
+/* These scalars reach large (in absolute value) outputs when fed to secp256k1_scalar_split_lambda.
+ *
+ * They are computed as:
+ * - For a in [-2, -1, 0, 1, 2]:
+ * - For b in [-3, -1, 1, 3]:
+ * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER
+ */
+static const secp256k1_scalar scalars_near_split_bounds[20] = {
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc),
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd),
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe),
+ SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6ff),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632d),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632e),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632f),
+ SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf76330),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b209f),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a0),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a1),
+ SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a2),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede11),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede12),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede13),
+ SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede14),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a42),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a43),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a44),
+ SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45)
+};
+
+void test_ecmult_target(const secp256k1_scalar* target, int mode) {
+ /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */
+ secp256k1_scalar n1, n2;
+ secp256k1_ge p;
+ secp256k1_gej pj, p1j, p2j, ptj;
+ static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+
+ /* Generate random n1,n2 such that n1+n2 = -target. */
+ random_scalar_order_test(&n1);
+ secp256k1_scalar_add(&n2, &n1, target);
+ secp256k1_scalar_negate(&n2, &n2);
+
+ /* Generate a random input point. */
+ if (mode != 0) {
+ random_group_element_test(&p);
+ secp256k1_gej_set_ge(&pj, &p);
+ }
+
+ /* EC multiplications */
+ if (mode == 0) {
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target);
+ } else if (mode == 1) {
+ secp256k1_ecmult(&ctx->ecmult_ctx, &p1j, &pj, &n1, &zero);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &p2j, &pj, &n2, &zero);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &ptj, &pj, target, &zero);
+ } else {
+ secp256k1_ecmult_const(&p1j, &p, &n1, 256);
+ secp256k1_ecmult_const(&p2j, &p, &n2, 256);
+ secp256k1_ecmult_const(&ptj, &p, target, 256);
+ }
+
+ /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */
+ secp256k1_gej_add_var(&ptj, &ptj, &p1j, NULL);
+ secp256k1_gej_add_var(&ptj, &ptj, &p2j, NULL);
+ CHECK(secp256k1_gej_is_infinity(&ptj));
+}
+
+void run_ecmult_near_split_bound(void) {
+ int i;
+ unsigned j;
+ for (i = 0; i < 4*count; ++i) {
+ for (j = 0; j < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++j) {
+ test_ecmult_target(&scalars_near_split_bounds[j], 0);
+ test_ecmult_target(&scalars_near_split_bounds[j], 1);
+ test_ecmult_target(&scalars_near_split_bounds[j], 2);
+ }
+ }
+}
+
void run_point_times_order(void) {
int i;
secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2);
@@ -2646,7 +2720,6 @@ void run_point_times_order(void) {
secp256k1_gej j;
CHECK(secp256k1_ge_is_valid_var(&p));
secp256k1_gej_set_ge(&j, &p);
- CHECK(secp256k1_gej_is_valid_var(&j));
test_point_times_order(&j);
}
secp256k1_fe_sqr(&x, &x);
@@ -3042,12 +3115,10 @@ void test_secp256k1_pippenger_bucket_window_inv(void) {
CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0);
for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) {
-#ifdef USE_ENDOMORPHISM
/* Bucket_window of 8 is not used with endo */
if (i == 8) {
continue;
}
-#endif
CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i);
if (i != PIPPENGER_MAX_BUCKET_WINDOW) {
CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i);
@@ -3060,7 +3131,7 @@ void test_secp256k1_pippenger_bucket_window_inv(void) {
* for a given scratch space.
*/
void test_ecmult_multi_pippenger_max_points(void) {
- size_t scratch_size = secp256k1_rand_int(256);
+ size_t scratch_size = secp256k1_testrand_int(256);
size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12);
secp256k1_scratch *scratch;
size_t n_points_supported;
@@ -3290,13 +3361,10 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) {
secp256k1_scalar_set_int(&x, 0);
secp256k1_scalar_set_int(&shift, 1 << w);
- /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
-#ifdef USE_ENDOMORPHISM
for (i = 0; i < 16; ++i) {
secp256k1_scalar_shr_int(&num, 8);
}
bits = 128;
-#endif
skew = secp256k1_wnaf_const(wnaf, &num, w, bits);
for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) {
@@ -3331,12 +3399,9 @@ void test_fixed_wnaf(const secp256k1_scalar *number, int w) {
secp256k1_scalar_set_int(&x, 0);
secp256k1_scalar_set_int(&shift, 1 << w);
- /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
-#ifdef USE_ENDOMORPHISM
for (i = 0; i < 16; ++i) {
secp256k1_scalar_shr_int(&num, 8);
}
-#endif
skew = secp256k1_wnaf_fixed(wnaf, &num, w);
for (i = WNAF_SIZE(w)-1; i >= 0; --i) {
@@ -3520,7 +3585,7 @@ void test_ecmult_gen_blind(void) {
secp256k1_ge pge;
random_scalar_order_test(&key);
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key);
- secp256k1_rand256(seed32);
+ secp256k1_testrand256(seed32);
b = ctx->ecmult_gen_ctx.blind;
i = ctx->ecmult_gen_ctx.initial;
secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
@@ -3552,16 +3617,18 @@ void run_ecmult_gen_blind(void) {
}
}
-#ifdef USE_ENDOMORPHISM
/***** ENDOMORPHISH TESTS *****/
-void test_scalar_split(void) {
- secp256k1_scalar full;
- secp256k1_scalar s1, slam;
+void test_scalar_split(const secp256k1_scalar* full) {
+ secp256k1_scalar s, s1, slam;
const unsigned char zero[32] = {0};
unsigned char tmp[32];
- random_scalar_order_test(&full);
- secp256k1_scalar_split_lambda(&s1, &slam, &full);
+ secp256k1_scalar_split_lambda(&s1, &slam, full);
+
+ /* check slam*lambda + s1 == full */
+ secp256k1_scalar_mul(&s, &secp256k1_const_lambda, &slam);
+ secp256k1_scalar_add(&s, &s, &s1);
+ CHECK(secp256k1_scalar_eq(&s, full));
/* check that both are <= 128 bits in size */
if (secp256k1_scalar_is_high(&s1)) {
@@ -3572,15 +3639,32 @@ void test_scalar_split(void) {
}
secp256k1_scalar_get_b32(tmp, &s1);
- CHECK(memcmp(zero, tmp, 16) == 0);
+ CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0);
secp256k1_scalar_get_b32(tmp, &slam);
- CHECK(memcmp(zero, tmp, 16) == 0);
+ CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0);
}
+
void run_endomorphism_tests(void) {
- test_scalar_split();
+ unsigned i;
+ static secp256k1_scalar s;
+ test_scalar_split(&secp256k1_scalar_zero);
+ test_scalar_split(&secp256k1_scalar_one);
+ secp256k1_scalar_negate(&s,&secp256k1_scalar_one);
+ test_scalar_split(&s);
+ test_scalar_split(&secp256k1_const_lambda);
+ secp256k1_scalar_add(&s, &secp256k1_const_lambda, &secp256k1_scalar_one);
+ test_scalar_split(&s);
+
+ for (i = 0; i < 100U * count; ++i) {
+ secp256k1_scalar full;
+ random_scalar_order_test(&full);
+ test_scalar_split(&full);
+ }
+ for (i = 0; i < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++i) {
+ test_scalar_split(&scalars_near_split_bounds[i]);
+ }
}
-#endif
void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) {
unsigned char pubkeyc[65];
@@ -3622,7 +3706,7 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali
CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
VG_CHECK(pubkeyo, outl);
CHECK(outl == 33);
- CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0);
CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0]));
if (ypass) {
/* This test isn't always done because we decode with alternative signs, so the y won't match. */
@@ -3638,7 +3722,7 @@ void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvali
VG_CHECK(pubkeyo, outl);
CHECK(outl == 65);
CHECK(pubkeyo[0] == 4);
- CHECK(memcmp(&pubkeyo[1], input, 64) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkeyo[1], input, 64) == 0);
}
CHECK(ecount == 0);
} else {
@@ -4007,7 +4091,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* Maximum value is too large, reject. */
memset(ctmp, 255, 32);
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
@@ -4015,7 +4099,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* Zero is too small, reject. */
memset(ctmp, 0, 32);
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
@@ -4023,7 +4107,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* One must be accepted. */
ctmp[31] = 0x01;
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
@@ -4031,7 +4115,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
pubkey_one = pubkey;
/* Group order + 1 is too large, reject. */
memcpy(ctmp, orderc, 32);
@@ -4041,7 +4125,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* -1 must be accepted. */
ctmp[31] = 0x40;
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
@@ -4049,20 +4133,20 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
VG_CHECK(&pubkey, sizeof(pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
pubkey_negone = pubkey;
/* Tweak of zero leaves the value unchanged. */
memset(ctmp2, 0, 32);
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1);
- CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40);
+ CHECK(secp256k1_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40);
memcpy(&pubkey2, &pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
/* Multiply tweak of zero zeroizes the output. */
CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
/* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing
seckey, the seckey is zeroized. */
@@ -4072,29 +4156,29 @@ void run_eckey_edge_case_test(void) {
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp2) == 1);
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
memcpy(ctmp, orderc, 32);
CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
/* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing
tweak, the seckey is zeroized. */
memcpy(ctmp, orderc, 32);
ctmp[31] = 0x40;
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
memcpy(ctmp, orderc, 32);
ctmp[31] = 0x40;
CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0);
- CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0);
memcpy(ctmp, orderc, 32);
ctmp[31] = 0x40;
/* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing
tweak, the pubkey is zeroized. */
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
/* If the resulting key in secp256k1_ec_seckey_tweak_add and
* secp256k1_ec_pubkey_tweak_add is 0 the functions fail and in the latter
@@ -4104,25 +4188,25 @@ void run_eckey_edge_case_test(void) {
memset(ctmp2, 0, 32);
ctmp2[31] = 1;
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0);
- CHECK(memcmp(zeros, ctmp2, 32) == 0);
+ CHECK(secp256k1_memcmp_var(zeros, ctmp2, 32) == 0);
ctmp2[31] = 1;
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
/* Tweak computation wraps and results in a key of 1. */
ctmp2[31] = 2;
CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1);
- CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1);
+ CHECK(secp256k1_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1);
ctmp2[31] = 2;
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
ctmp2[31] = 1;
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
/* Tweak mul * 2 = 1+1. */
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
ctmp2[31] = 2;
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
/* Test argument errors. */
ecount = 0;
secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
@@ -4131,12 +4215,12 @@ void run_eckey_edge_case_test(void) {
memset(&pubkey, 0, 32);
CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
CHECK(ecount == 1);
- CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0);
memcpy(&pubkey, &pubkey2, sizeof(pubkey));
memset(&pubkey2, 0, 32);
CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0);
CHECK(ecount == 2);
- CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0);
/* Plain argument errors. */
ecount = 0;
CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
@@ -4176,7 +4260,7 @@ void run_eckey_edge_case_test(void) {
memset(&pubkey, 1, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0);
CHECK(ecount == 2);
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
/* secp256k1_ec_pubkey_combine tests. */
ecount = 0;
pubkeys[0] = &pubkey_one;
@@ -4187,28 +4271,28 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0);
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 2);
memset(&pubkey, 255, sizeof(secp256k1_pubkey));
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 3);
pubkeys[0] = &pubkey_negone;
memset(&pubkey, 255, sizeof(secp256k1_pubkey));
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
CHECK(ecount == 3);
len = 33;
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1);
- CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0);
/* Result is infinity. */
pubkeys[0] = &pubkey_one;
pubkeys[1] = &pubkey_negone;
@@ -4216,7 +4300,7 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
CHECK(ecount == 3);
/* Passes through infinity but comes out one. */
pubkeys[2] = &pubkey_one;
@@ -4224,19 +4308,19 @@ void run_eckey_edge_case_test(void) {
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
CHECK(ecount == 3);
len = 33;
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1);
- CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0);
/* Adds to two. */
pubkeys[1] = &pubkey_one;
memset(&pubkey, 255, sizeof(secp256k1_pubkey));
VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1);
VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
- CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
CHECK(ecount == 3);
secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
}
@@ -4250,21 +4334,21 @@ void run_eckey_negate_test(void) {
/* Verify negation changes the key and changes it back */
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1);
- CHECK(memcmp(seckey, seckey_tmp, 32) != 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) != 0);
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1);
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
/* Check that privkey alias gives same result */
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1);
CHECK(secp256k1_ec_privkey_negate(ctx, seckey_tmp) == 1);
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
/* Negating all 0s fails */
memset(seckey, 0, 32);
memset(seckey_tmp, 0, 32);
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0);
/* Check that seckey is not modified */
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
/* Negating an overflowing seckey fails and the seckey is zeroed. In this
* test, the seckey has 16 random bytes to ensure that ec_seckey_negate
@@ -4273,7 +4357,7 @@ void run_eckey_negate_test(void) {
memset(seckey, 0xFF, 16);
memset(seckey_tmp, 0, 32);
CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0);
- CHECK(memcmp(seckey, seckey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0);
}
void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) {
@@ -4295,7 +4379,7 @@ void test_ecdsa_sign_verify(void) {
random_scalar_order_test(&key);
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key);
secp256k1_ge_set_gej(&pub, &pubj);
- getrec = secp256k1_rand_bits(1);
+ getrec = secp256k1_testrand_bits(1);
random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL);
if (getrec) {
CHECK(recid >= 0 && recid < 4);
@@ -4362,7 +4446,7 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char
int is_empty_signature(const secp256k1_ecdsa_signature *sig) {
static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0};
- return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
+ return secp256k1_memcmp_var(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
}
void test_ecdsa_end_to_end(void) {
@@ -4395,31 +4479,31 @@ void test_ecdsa_end_to_end(void) {
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
/* Verify exporting and importing public key. */
- CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
memset(&pubkey, 0, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
/* Verify negation changes the key and changes it back */
memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1);
- CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0);
+ CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0);
CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1);
- CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0);
/* Verify private key import and export. */
- CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1));
+ CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_testrand_bits(1) == 1));
CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1);
- CHECK(memcmp(privkey, privkey2, 32) == 0);
+ CHECK(secp256k1_memcmp_var(privkey, privkey2, 32) == 0);
/* Optionally tweak the keys using addition. */
- if (secp256k1_rand_int(3) == 0) {
+ if (secp256k1_testrand_int(3) == 0) {
int ret1;
int ret2;
int ret3;
unsigned char rnd[32];
unsigned char privkey_tmp[32];
secp256k1_pubkey pubkey2;
- secp256k1_rand256_test(rnd);
+ secp256k1_testrand256_test(rnd);
memcpy(privkey_tmp, privkey, 32);
ret1 = secp256k1_ec_seckey_tweak_add(ctx, privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd);
@@ -4430,20 +4514,20 @@ void test_ecdsa_end_to_end(void) {
if (ret1 == 0) {
return;
}
- CHECK(memcmp(privkey, privkey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0);
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
}
/* Optionally tweak the keys using multiplication. */
- if (secp256k1_rand_int(3) == 0) {
+ if (secp256k1_testrand_int(3) == 0) {
int ret1;
int ret2;
int ret3;
unsigned char rnd[32];
unsigned char privkey_tmp[32];
secp256k1_pubkey pubkey2;
- secp256k1_rand256_test(rnd);
+ secp256k1_testrand256_test(rnd);
memcpy(privkey_tmp, privkey, 32);
ret1 = secp256k1_ec_seckey_tweak_mul(ctx, privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd);
@@ -4454,9 +4538,9 @@ void test_ecdsa_end_to_end(void) {
if (ret1 == 0) {
return;
}
- CHECK(memcmp(privkey, privkey_tmp, 32) == 0);
+ CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0);
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
- CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
}
/* Sign. */
@@ -4468,13 +4552,13 @@ void test_ecdsa_end_to_end(void) {
extra[31] = 0;
extra[0] = 1;
CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1);
- CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0);
- CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0);
- CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(secp256k1_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0);
/* Verify. */
CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1);
@@ -4495,7 +4579,7 @@ void test_ecdsa_end_to_end(void) {
secp256k1_ecdsa_signature_save(&signature[5], &r, &s);
CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1);
- CHECK(memcmp(&signature[5], &signature[0], 64) == 0);
+ CHECK(secp256k1_memcmp_var(&signature[5], &signature[0], 64) == 0);
/* Serialize/parse DER and verify again */
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
@@ -4505,7 +4589,7 @@ void test_ecdsa_end_to_end(void) {
/* Serialize/destroy/parse DER and verify again. */
siglen = 74;
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
- sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255);
+ sig[secp256k1_testrand_int(siglen)] += 1 + secp256k1_testrand_int(255);
CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 ||
secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0);
}
@@ -4515,23 +4599,23 @@ void test_random_pubkeys(void) {
secp256k1_ge elem2;
unsigned char in[65];
/* Generate some randomly sized pubkeys. */
- size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33;
- if (secp256k1_rand_bits(2) == 0) {
- len = secp256k1_rand_bits(6);
+ size_t len = secp256k1_testrand_bits(2) == 0 ? 65 : 33;
+ if (secp256k1_testrand_bits(2) == 0) {
+ len = secp256k1_testrand_bits(6);
}
if (len == 65) {
- in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7);
+ in[0] = secp256k1_testrand_bits(1) ? 4 : (secp256k1_testrand_bits(1) ? 6 : 7);
} else {
- in[0] = secp256k1_rand_bits(1) ? 2 : 3;
+ in[0] = secp256k1_testrand_bits(1) ? 2 : 3;
}
- if (secp256k1_rand_bits(3) == 0) {
- in[0] = secp256k1_rand_bits(8);
+ if (secp256k1_testrand_bits(3) == 0) {
+ in[0] = secp256k1_testrand_bits(8);
}
if (len > 1) {
- secp256k1_rand256(&in[1]);
+ secp256k1_testrand256(&in[1]);
}
if (len > 33) {
- secp256k1_rand256(&in[33]);
+ secp256k1_testrand256(&in[33]);
}
if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
unsigned char out[65];
@@ -4542,7 +4626,7 @@ void test_random_pubkeys(void) {
/* If the pubkey can be parsed, it should round-trip... */
CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33));
CHECK(size == len);
- CHECK(memcmp(&in[1], &out[1], len-1) == 0);
+ CHECK(secp256k1_memcmp_var(&in[1], &out[1], len-1) == 0);
/* ... except for the type of hybrid inputs. */
if ((in[0] != 6) && (in[0] != 7)) {
CHECK(in[0] == out[0]);
@@ -4553,7 +4637,7 @@ void test_random_pubkeys(void) {
CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size));
ge_equals_ge(&elem,&elem2);
/* Check that the X9.62 hybrid type is checked. */
- in[0] = secp256k1_rand_bits(1) ? 6 : 7;
+ in[0] = secp256k1_testrand_bits(1) ? 6 : 7;
res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
if (firstb == 2 || firstb == 3) {
if (in[0] == firstb + 4) {
@@ -4565,7 +4649,7 @@ void test_random_pubkeys(void) {
if (res) {
ge_equals_ge(&elem,&elem2);
CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0));
- CHECK(memcmp(&in[1], &out[1], 64) == 0);
+ CHECK(secp256k1_memcmp_var(&in[1], &out[1], 64) == 0);
}
}
}
@@ -4621,21 +4705,21 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen);
if (parsed_der) {
ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0;
- valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0);
+ valid_der = (secp256k1_memcmp_var(compact_der, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der + 32, zeroes, 32) != 0);
}
if (valid_der) {
ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1;
- roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0;
+ roundtrips_der = (len_der == siglen) && secp256k1_memcmp_var(roundtrip_der, sig, siglen) == 0;
}
parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen);
if (parsed_der_lax) {
ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10;
- valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0);
+ valid_der_lax = (secp256k1_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0);
}
if (valid_der_lax) {
ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11;
- roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0;
+ roundtrips_der_lax = (len_der_lax == siglen) && secp256k1_memcmp_var(roundtrip_der_lax, sig, siglen) == 0;
}
if (certainly_der) {
@@ -4651,7 +4735,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
if (valid_der) {
ret |= (!roundtrips_der_lax) << 12;
ret |= (len_der != len_der_lax) << 13;
- ret |= ((len_der != len_der_lax) || (memcmp(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14;
+ ret |= ((len_der != len_der_lax) || (secp256k1_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14;
}
ret |= (roundtrips_der != roundtrips_der_lax) << 15;
if (parsed_der) {
@@ -4668,19 +4752,19 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
if (valid_openssl) {
unsigned char tmp[32] = {0};
BN_bn2bin(r, tmp + 32 - BN_num_bytes(r));
- valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0;
}
if (valid_openssl) {
unsigned char tmp[32] = {0};
BN_bn2bin(s, tmp + 32 - BN_num_bytes(s));
- valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0;
}
}
len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL);
if (len_openssl <= 2048) {
unsigned char *ptr = roundtrip_openssl;
CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl);
- roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (memcmp(roundtrip_openssl, sig, siglen) == 0);
+ roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (secp256k1_memcmp_var(roundtrip_openssl, sig, siglen) == 0);
} else {
len_openssl = 0;
}
@@ -4692,7 +4776,7 @@ int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_
ret |= (roundtrips_der != roundtrips_openssl) << 7;
if (roundtrips_openssl) {
ret |= (len_der != (size_t)len_openssl) << 8;
- ret |= ((len_der != (size_t)len_openssl) || (memcmp(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9;
+ ret |= ((len_der != (size_t)len_openssl) || (secp256k1_memcmp_var(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9;
}
#endif
return ret;
@@ -4712,27 +4796,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) {
static void damage_array(unsigned char *sig, size_t *len) {
int pos;
- int action = secp256k1_rand_bits(3);
+ int action = secp256k1_testrand_bits(3);
if (action < 1 && *len > 3) {
/* Delete a byte. */
- pos = secp256k1_rand_int(*len);
+ pos = secp256k1_testrand_int(*len);
memmove(sig + pos, sig + pos + 1, *len - pos - 1);
(*len)--;
return;
} else if (action < 2 && *len < 2048) {
/* Insert a byte. */
- pos = secp256k1_rand_int(1 + *len);
+ pos = secp256k1_testrand_int(1 + *len);
memmove(sig + pos + 1, sig + pos, *len - pos);
- sig[pos] = secp256k1_rand_bits(8);
+ sig[pos] = secp256k1_testrand_bits(8);
(*len)++;
return;
} else if (action < 4) {
/* Modify a byte. */
- sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255);
+ sig[secp256k1_testrand_int(*len)] += 1 + secp256k1_testrand_int(255);
return;
} else { /* action < 8 */
/* Modify a bit. */
- sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3);
+ sig[secp256k1_testrand_int(*len)] ^= 1 << secp256k1_testrand_bits(3);
return;
}
}
@@ -4745,23 +4829,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
int n;
*len = 0;
- der = secp256k1_rand_bits(2) == 0;
+ der = secp256k1_testrand_bits(2) == 0;
*certainly_der = der;
*certainly_not_der = 0;
- indet = der ? 0 : secp256k1_rand_int(10) == 0;
+ indet = der ? 0 : secp256k1_testrand_int(10) == 0;
for (n = 0; n < 2; n++) {
/* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */
- nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0);
+ nlow[n] = der ? 1 : (secp256k1_testrand_bits(3) != 0);
/* The length of the number in bytes (the first byte of which will always be nonzero) */
- nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8;
+ nlen[n] = nlow[n] ? secp256k1_testrand_int(33) : 32 + secp256k1_testrand_int(200) * secp256k1_testrand_int(8) / 8;
CHECK(nlen[n] <= 232);
/* The top bit of the number. */
- nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1));
+ nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_testrand_bits(1));
/* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */
- nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127));
+ nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_testrand_bits(7) : 1 + secp256k1_testrand_int(127));
/* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */
- nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8);
+ nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_testrand_int(3) : secp256k1_testrand_int(300 - nlen[n]) * secp256k1_testrand_int(8) / 8);
if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) {
*certainly_not_der = 1;
}
@@ -4770,7 +4854,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2);
if (!der) {
/* nlenlen[n] max 127 bytes */
- int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ int add = secp256k1_testrand_int(127 - nlenlen[n]) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256;
nlenlen[n] += add;
if (add != 0) {
*certainly_not_der = 1;
@@ -4784,7 +4868,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen <= 856);
/* The length of the garbage inside the tuple. */
- elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8;
+ elen = (der || indet) ? 0 : secp256k1_testrand_int(980 - tlen) * secp256k1_testrand_int(8) / 8;
if (elen != 0) {
*certainly_not_der = 1;
}
@@ -4792,7 +4876,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen <= 980);
/* The length of the garbage after the end of the tuple. */
- glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8;
+ glen = der ? 0 : secp256k1_testrand_int(990 - tlen) * secp256k1_testrand_int(8) / 8;
if (glen != 0) {
*certainly_not_der = 1;
}
@@ -4807,7 +4891,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
} else {
int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2);
if (!der) {
- int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ int add = secp256k1_testrand_int(127 - tlenlen) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256;
tlenlen += add;
if (add != 0) {
*certainly_not_der = 1;
@@ -4858,13 +4942,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
nlen[n]--;
}
/* Generate remaining random bytes of number */
- secp256k1_rand_bytes_test(sig + *len, nlen[n]);
+ secp256k1_testrand_bytes_test(sig + *len, nlen[n]);
*len += nlen[n];
nlen[n] = 0;
}
/* Generate random garbage inside tuple. */
- secp256k1_rand_bytes_test(sig + *len, elen);
+ secp256k1_testrand_bytes_test(sig + *len, elen);
*len += elen;
/* Generate end-of-contents bytes. */
@@ -4876,7 +4960,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen + glen <= 1121);
/* Generate random garbage outside tuple. */
- secp256k1_rand_bytes_test(sig + *len, glen);
+ secp256k1_testrand_bytes_test(sig + *len, glen);
*len += glen;
tlen += glen;
CHECK(tlen <= 1121);
@@ -5208,11 +5292,11 @@ void test_ecdsa_edge_cases(void) {
CHECK(!is_empty_signature(&sig));
CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1);
CHECK(!is_empty_signature(&sig2));
- CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0);
/* The default nonce function is deterministic. */
CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
CHECK(!is_empty_signature(&sig2));
- CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0);
/* The default nonce function changes output with different messages. */
for(i = 0; i < 256; i++) {
int j;
@@ -5259,12 +5343,12 @@ void test_ecdsa_edge_cases(void) {
VG_CHECK(nonce3,32);
CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1);
VG_CHECK(nonce4,32);
- CHECK(memcmp(nonce, nonce2, 32) != 0);
- CHECK(memcmp(nonce, nonce3, 32) != 0);
- CHECK(memcmp(nonce, nonce4, 32) != 0);
- CHECK(memcmp(nonce2, nonce3, 32) != 0);
- CHECK(memcmp(nonce2, nonce4, 32) != 0);
- CHECK(memcmp(nonce3, nonce4, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce, nonce3, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce, nonce4, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce2, nonce3, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce2, nonce4, 32) != 0);
+ CHECK(secp256k1_memcmp_var(nonce3, nonce4, 32) != 0);
}
@@ -5293,7 +5377,7 @@ EC_KEY *get_openssl_key(const unsigned char *key32) {
unsigned char privkey[300];
size_t privkeylen;
const unsigned char* pbegin = privkey;
- int compr = secp256k1_rand_bits(1);
+ int compr = secp256k1_testrand_bits(1);
EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1);
CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr));
CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen));
@@ -5314,7 +5398,7 @@ void test_ecdsa_openssl(void) {
unsigned char message[32];
unsigned char signature[80];
unsigned char key32[32];
- secp256k1_rand256_test(message);
+ secp256k1_testrand256_test(message);
secp256k1_scalar_set_b32(&msg, message, NULL);
random_scalar_order_test(&key);
secp256k1_scalar_get_b32(key32, &key);
@@ -5367,12 +5451,12 @@ void run_memczero_test(void) {
/* memczero(..., ..., 0) is a noop. */
memcpy(buf2, buf1, sizeof(buf1));
memczero(buf1, sizeof(buf1), 0);
- CHECK(memcmp(buf1, buf2, sizeof(buf1)) == 0);
+ CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0);
/* memczero(..., ..., 1) zeros the buffer. */
memset(buf2, 0, sizeof(buf2));
memczero(buf1, sizeof(buf1) , 1);
- CHECK(memcmp(buf1, buf2, sizeof(buf1)) == 0);
+ CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0);
}
void int_cmov_test(void) {
@@ -5411,23 +5495,23 @@ void fe_cmov_test(void) {
secp256k1_fe a = zero;
secp256k1_fe_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_fe_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_fe_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_fe_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_fe_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void fe_storage_cmov_test(void) {
@@ -5441,23 +5525,23 @@ void fe_storage_cmov_test(void) {
secp256k1_fe_storage a = zero;
secp256k1_fe_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_fe_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_fe_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_fe_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_fe_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void scalar_cmov_test(void) {
@@ -5471,23 +5555,23 @@ void scalar_cmov_test(void) {
secp256k1_scalar a = zero;
secp256k1_scalar_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_scalar_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_scalar_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_scalar_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_scalar_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void ge_storage_cmov_test(void) {
@@ -5503,23 +5587,23 @@ void ge_storage_cmov_test(void) {
secp256k1_ge_storage a = zero;
secp256k1_ge_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
r = zero; a = max;
secp256k1_ge_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &max, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0);
a = zero;
secp256k1_ge_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &zero, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0);
a = one;
secp256k1_ge_storage_cmov(&r, &a, 1);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
r = one; a = zero;
secp256k1_ge_storage_cmov(&r, &a, 0);
- CHECK(memcmp(&r, &one, sizeof(r)) == 0);
+ CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0);
}
void run_cmov_tests(void) {
@@ -5531,9 +5615,6 @@ void run_cmov_tests(void) {
}
int main(int argc, char **argv) {
- unsigned char seed16[16] = {0};
- unsigned char run32[32] = {0};
-
/* Disable buffering for stdout to improve reliability of getting
* diagnostic information. Happens right at the start of main because
* setbuf must be used before any other operation on the stream. */
@@ -5546,52 +5627,20 @@ int main(int argc, char **argv) {
if (argc > 1) {
count = strtol(argv[1], NULL, 0);
}
+ printf("test count = %i\n", count);
/* find random seed */
- if (argc > 2) {
- int pos = 0;
- const char* ch = argv[2];
- while (pos < 16 && ch[0] != 0 && ch[1] != 0) {
- unsigned short sh;
- if ((sscanf(ch, "%2hx", &sh)) == 1) {
- seed16[pos] = sh;
- } else {
- break;
- }
- ch += 2;
- pos++;
- }
- } else {
- FILE *frand = fopen("/dev/urandom", "r");
- if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
- uint64_t t = time(NULL) * (uint64_t)1337;
- fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
- seed16[0] ^= t;
- seed16[1] ^= t >> 8;
- seed16[2] ^= t >> 16;
- seed16[3] ^= t >> 24;
- seed16[4] ^= t >> 32;
- seed16[5] ^= t >> 40;
- seed16[6] ^= t >> 48;
- seed16[7] ^= t >> 56;
- }
- if (frand) {
- fclose(frand);
- }
- }
- secp256k1_rand_seed(seed16);
-
- printf("test count = %i\n", count);
- printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+ secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
/* initialize */
run_context_tests(0);
run_context_tests(1);
run_scratch_tests();
ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
- if (secp256k1_rand_bits(1)) {
- secp256k1_rand256(run32);
- CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL));
+ if (secp256k1_testrand_bits(1)) {
+ unsigned char rand32[32];
+ secp256k1_testrand256(rand32);
+ CHECK(secp256k1_context_randomize(ctx, secp256k1_testrand_bits(1) ? rand32 : NULL));
}
run_rand_bits();
@@ -5625,6 +5674,7 @@ int main(int argc, char **argv) {
/* ecmult tests */
run_wnaf();
run_point_times_order();
+ run_ecmult_near_split_bound();
run_ecmult_chain();
run_ecmult_constants();
run_ecmult_gen_blind();
@@ -5633,9 +5683,7 @@ int main(int argc, char **argv) {
run_ec_combine();
/* endomorphism tests */
-#ifdef USE_ENDOMORPHISM
run_endomorphism_tests();
-#endif
/* EC point parser test */
run_ec_pubkey_parse_test();
@@ -5679,8 +5727,7 @@ int main(int argc, char **argv) {
run_cmov_tests();
- secp256k1_rand256(run32);
- printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
+ secp256k1_testrand_finish();
/* shutdown */
secp256k1_context_destroy(ctx);
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
index 681ed80bd0..f4d5b8e176 100644
--- a/src/secp256k1/src/tests_exhaustive.c
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -18,7 +18,6 @@
#ifndef EXHAUSTIVE_TEST_ORDER
/* see group_impl.h for allowable values */
#define EXHAUSTIVE_TEST_ORDER 13
-#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
#endif
#include "include/secp256k1.h"
@@ -27,10 +26,7 @@
#include "secp256k1.c"
#include "testrand_impl.h"
-#ifdef ENABLE_MODULE_RECOVERY
-#include "src/modules/recovery/main_impl.h"
-#include "include/secp256k1_recovery.h"
-#endif
+static int count = 2;
/** stolen from tests.c */
void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
@@ -62,7 +58,7 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_rand256(bin);
+ secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@@ -70,6 +66,15 @@ void random_fe(secp256k1_fe *x) {
}
/** END stolen from tests.c */
+static uint32_t num_cores = 1;
+static uint32_t this_core = 0;
+
+SECP256K1_INLINE static int skip_section(uint64_t* iter) {
+ if (num_cores == 1) return 0;
+ *iter += 0xe7037ed1a0b428dbULL;
+ return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core;
+}
+
int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
const unsigned char *key32, const unsigned char *algo16,
void *data, unsigned int attempt) {
@@ -90,91 +95,93 @@ int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned cha
return 1;
}
-#ifdef USE_ENDOMORPHISM
-void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
+void test_exhaustive_endomorphism(const secp256k1_ge *group) {
int i;
- for (i = 0; i < order; i++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge res;
secp256k1_ge_mul_lambda(&res, &group[i]);
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
}
}
-#endif
-void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j;
+ uint64_t iter = 0;
/* Sanity-check (and check infinity functions) */
CHECK(secp256k1_ge_is_infinity(&group[0]));
CHECK(secp256k1_gej_is_infinity(&groupj[0]));
- for (i = 1; i < order; i++) {
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
CHECK(!secp256k1_ge_is_infinity(&group[i]));
CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
}
/* Check all addition formulae */
- for (j = 0; j < order; j++) {
+ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
secp256k1_fe fe_inv;
+ if (skip_section(&iter)) continue;
secp256k1_fe_inv(&fe_inv, &groupj[j].z);
- for (i = 0; i < order; i++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge zless_gej;
secp256k1_gej tmp;
/* add_var */
secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_ge */
if (j > 0) {
secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* add_ge_var */
secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_zinv_var */
zless_gej.infinity = groupj[j].infinity;
zless_gej.x = groupj[j].x;
zless_gej.y = groupj[j].y;
secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
- ge_equals_gej(&group[(i + j) % order], &tmp);
+ ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
/* Check doubling */
- for (i = 0; i < order; i++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
secp256k1_gej_double(&tmp, &groupj[i]);
- ge_equals_gej(&group[(2 * i) % order], &tmp);
+ ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
- ge_equals_gej(&group[(2 * i) % order], &tmp);
+ ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* Check negation */
- for (i = 1; i < order; i++) {
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge tmp;
secp256k1_gej tmpj;
secp256k1_ge_neg(&tmp, &group[i]);
- ge_equals_ge(&group[order - i], &tmp);
+ ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp);
secp256k1_gej_neg(&tmpj, &groupj[i]);
- ge_equals_gej(&group[order - i], &tmpj);
+ ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj);
}
}
-void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j, r_log;
- for (r_log = 1; r_log < order; r_log++) {
- for (j = 0; j < order; j++) {
- for (i = 0; i < order; i++) {
+ uint64_t iter = 0;
+ for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) {
+ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
+ if (skip_section(&iter)) continue;
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
secp256k1_scalar na, ng;
secp256k1_scalar_set_int(&na, i);
secp256k1_scalar_set_int(&ng, j);
secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
- ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
+ ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
if (i > 0) {
secp256k1_ecmult_const(&tmp, &group[i], &ng, 256);
- ge_equals_gej(&group[(i * j) % order], &tmp);
+ ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
@@ -193,14 +200,16 @@ static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t
return 1;
}
-void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k, x, y;
+ uint64_t iter = 0;
secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096);
- for (i = 0; i < order; i++) {
- for (j = 0; j < order; j++) {
- for (k = 0; k < order; k++) {
- for (x = 0; x < order; x++) {
- for (y = 0; y < order; y++) {
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
+ for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
+ for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) {
+ if (skip_section(&iter)) continue;
+ for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) {
secp256k1_gej tmp;
secp256k1_scalar g_sc;
ecmult_multi_data data;
@@ -212,7 +221,7 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
data.pt[1] = group[y];
secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
- ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
+ ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
@@ -221,22 +230,23 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
-void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
+void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k, int* overflow) {
secp256k1_fe x;
unsigned char x_bin[32];
k %= EXHAUSTIVE_TEST_ORDER;
x = group[k].x;
secp256k1_fe_normalize(&x);
secp256k1_fe_get_b32(x_bin, &x);
- secp256k1_scalar_set_b32(r, x_bin, NULL);
+ secp256k1_scalar_set_b32(r, x_bin, overflow);
}
-void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group) {
int s, r, msg, key;
- for (s = 1; s < order; s++) {
- for (r = 1; r < order; r++) {
- for (msg = 1; msg < order; msg++) {
- for (key = 1; key < order; key++) {
+ uint64_t iter = 0;
+ for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
+ for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
+ for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
+ for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
secp256k1_ge nonconst_ge;
secp256k1_ecdsa_signature sig;
secp256k1_pubkey pk;
@@ -245,6 +255,8 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
int k, should_verify;
unsigned char msg32[32];
+ if (skip_section(&iter)) continue;
+
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_set_int(&r_s, r);
secp256k1_scalar_set_int(&msg_s, msg);
@@ -254,9 +266,9 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
- for (k = 0; k < order; k++) {
+ for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
secp256k1_scalar check_x_s;
- r_from_k(&check_x_s, group, k);
+ r_from_k(&check_x_s, group, k, NULL);
if (r_s == check_x_s) {
secp256k1_scalar_set_int(&s_times_k_s, k);
secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
@@ -281,13 +293,15 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
}
}
-void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k;
+ uint64_t iter = 0;
/* Loop */
- for (i = 1; i < order; i++) { /* message */
- for (j = 1; j < order; j++) { /* key */
- for (k = 1; k < order; k++) { /* nonce */
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
+ for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
+ if (skip_section(&iter)) continue;
+ for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
secp256k1_ecdsa_signature sig;
secp256k1_scalar sk, msg, r, s, expected_r;
@@ -303,10 +317,10 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
- r_from_k(&expected_r, group, k);
+ r_from_k(&expected_r, group, k, NULL);
CHECK(r == expected_r);
- CHECK((k * s) % order == (i + r * j) % order ||
- (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+ CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* Overflow means we've tried every possible nonce */
if (k < starting_k) {
@@ -327,184 +341,114 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
}
#ifdef ENABLE_MODULE_RECOVERY
-void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
- int i, j, k;
-
- /* Loop */
- for (i = 1; i < order; i++) { /* message */
- for (j = 1; j < order; j++) { /* key */
- for (k = 1; k < order; k++) { /* nonce */
- const int starting_k = k;
- secp256k1_fe r_dot_y_normalized;
- secp256k1_ecdsa_recoverable_signature rsig;
- secp256k1_ecdsa_signature sig;
- secp256k1_scalar sk, msg, r, s, expected_r;
- unsigned char sk32[32], msg32[32];
- int expected_recid;
- int recid;
- secp256k1_scalar_set_int(&msg, i);
- secp256k1_scalar_set_int(&sk, j);
- secp256k1_scalar_get_b32(sk32, &sk);
- secp256k1_scalar_get_b32(msg32, &msg);
-
- secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+#include "src/modules/recovery/tests_exhaustive_impl.h"
+#endif
- /* Check directly */
- secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
- r_from_k(&expected_r, group, k);
- CHECK(r == expected_r);
- CHECK((k * s) % order == (i + r * j) % order ||
- (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
- /* In computing the recid, there is an overflow condition that is disabled in
- * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
- * will exceed the group order, and our signing code always holds out for r
- * values that don't overflow, so with a proper overflow check the tests would
- * loop indefinitely. */
- r_dot_y_normalized = group[k].y;
- secp256k1_fe_normalize(&r_dot_y_normalized);
- /* Also the recovery id is flipped depending if we hit the low-s branch */
- if ((k * s) % order == (i + r * j) % order) {
- expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
- } else {
- expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
- }
- CHECK(recid == expected_recid);
+#ifdef ENABLE_MODULE_EXTRAKEYS
+#include "src/modules/extrakeys/tests_exhaustive_impl.h"
+#endif
- /* Convert to a standard sig then check */
- secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
- secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
- /* Note that we compute expected_r *after* signing -- this is important
- * because our nonce-computing function function might change k during
- * signing. */
- r_from_k(&expected_r, group, k);
- CHECK(r == expected_r);
- CHECK((k * s) % order == (i + r * j) % order ||
- (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+#ifdef ENABLE_MODULE_SCHNORRSIG
+#include "src/modules/schnorrsig/tests_exhaustive_impl.h"
+#endif
- /* Overflow means we've tried every possible nonce */
- if (k < starting_k) {
- break;
- }
- }
+int main(int argc, char** argv) {
+ int i;
+ secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
+ secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
+ unsigned char rand32[32];
+ secp256k1_context *ctx;
+
+ /* Disable buffering for stdout to improve reliability of getting
+ * diagnostic information. Happens right at the start of main because
+ * setbuf must be used before any other operation on the stream. */
+ setbuf(stdout, NULL);
+ /* Also disable buffering for stderr because it's not guaranteed that it's
+ * unbuffered on all systems. */
+ setbuf(stderr, NULL);
+
+ printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER);
+
+ /* find iteration count */
+ if (argc > 1) {
+ count = strtol(argv[1], NULL, 0);
+ }
+ printf("test count = %i\n", count);
+
+ /* find random seed */
+ secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
+
+ /* set up split processing */
+ if (argc > 4) {
+ num_cores = strtol(argv[3], NULL, 0);
+ this_core = strtol(argv[4], NULL, 0);
+ if (num_cores < 1 || this_core >= num_cores) {
+ fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]);
+ return 1;
}
+ printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1);
}
-}
-
-void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
- /* This is essentially a copy of test_exhaustive_verify, with recovery added */
- int s, r, msg, key;
- for (s = 1; s < order; s++) {
- for (r = 1; r < order; r++) {
- for (msg = 1; msg < order; msg++) {
- for (key = 1; key < order; key++) {
- secp256k1_ge nonconst_ge;
- secp256k1_ecdsa_recoverable_signature rsig;
- secp256k1_ecdsa_signature sig;
- secp256k1_pubkey pk;
- secp256k1_scalar sk_s, msg_s, r_s, s_s;
- secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
- int recid = 0;
- int k, should_verify;
- unsigned char msg32[32];
- secp256k1_scalar_set_int(&s_s, s);
- secp256k1_scalar_set_int(&r_s, r);
- secp256k1_scalar_set_int(&msg_s, msg);
- secp256k1_scalar_set_int(&sk_s, key);
- secp256k1_scalar_get_b32(msg32, &msg_s);
+ while (count--) {
+ /* Build context */
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_testrand256(rand32);
+ CHECK(secp256k1_context_randomize(ctx, rand32));
+
+ /* Generate the entire group */
+ secp256k1_gej_set_infinity(&groupj[0]);
+ secp256k1_ge_set_gej(&group[0], &groupj[0]);
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
+ secp256k1_ge_set_gej(&group[i], &groupj[i]);
+ if (count != 0) {
+ /* Set a different random z-value for each Jacobian point, except z=1
+ is used in the last iteration. */
+ secp256k1_fe z;
+ random_fe(&z);
+ secp256k1_gej_rescale(&groupj[i], &z);
+ }
- /* Verify by hand */
- /* Run through every k value that gives us this r and check that *one* works.
- * Note there could be none, there could be multiple, ECDSA is weird. */
- should_verify = 0;
- for (k = 0; k < order; k++) {
- secp256k1_scalar check_x_s;
- r_from_k(&check_x_s, group, k);
- if (r_s == check_x_s) {
- secp256k1_scalar_set_int(&s_times_k_s, k);
- secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
- secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
- secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
- should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
- }
- }
- /* nb we have a "high s" rule */
- should_verify &= !secp256k1_scalar_is_high(&s_s);
+ /* Verify against ecmult_gen */
+ {
+ secp256k1_scalar scalar_i;
+ secp256k1_gej generatedj;
+ secp256k1_ge generated;
- /* We would like to try recovering the pubkey and checking that it matches,
- * but pubkey recovery is impossible in the exhaustive tests (the reason
- * being that there are 12 nonzero r values, 12 nonzero points, and no
- * overlap between the sets, so there are no valid signatures). */
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
+ secp256k1_ge_set_gej(&generated, &generatedj);
- /* Verify by converting to a standard signature and calling verify */
- secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
- secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
- memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
- secp256k1_pubkey_save(&pk, &nonconst_ge);
- CHECK(should_verify ==
- secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
- }
+ CHECK(group[i].infinity == 0);
+ CHECK(generated.infinity == 0);
+ CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
+ CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
}
}
- }
-}
-#endif
-
-int main(void) {
- int i;
- secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
- secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
- /* Build context */
- secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ /* Run the tests */
+ test_exhaustive_endomorphism(group);
+ test_exhaustive_addition(group, groupj);
+ test_exhaustive_ecmult(ctx, group, groupj);
+ test_exhaustive_ecmult_multi(ctx, group);
+ test_exhaustive_sign(ctx, group);
+ test_exhaustive_verify(ctx, group);
- /* TODO set z = 1, then do num_tests runs with random z values */
+#ifdef ENABLE_MODULE_RECOVERY
+ test_exhaustive_recovery(ctx, group);
+#endif
+#ifdef ENABLE_MODULE_EXTRAKEYS
+ test_exhaustive_extrakeys(ctx, group);
+#endif
+#ifdef ENABLE_MODULE_SCHNORRSIG
+ test_exhaustive_schnorrsig(ctx);
+#endif
- /* Generate the entire group */
- secp256k1_gej_set_infinity(&groupj[0]);
- secp256k1_ge_set_gej(&group[0], &groupj[0]);
- for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
- /* Set a different random z-value for each Jacobian point */
- secp256k1_fe z;
- random_fe(&z);
-
- secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
- secp256k1_ge_set_gej(&group[i], &groupj[i]);
- secp256k1_gej_rescale(&groupj[i], &z);
-
- /* Verify against ecmult_gen */
- {
- secp256k1_scalar scalar_i;
- secp256k1_gej generatedj;
- secp256k1_ge generated;
-
- secp256k1_scalar_set_int(&scalar_i, i);
- secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
- secp256k1_ge_set_gej(&generated, &generatedj);
-
- CHECK(group[i].infinity == 0);
- CHECK(generated.infinity == 0);
- CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
- CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
- }
+ secp256k1_context_destroy(ctx);
}
- /* Run the tests */
-#ifdef USE_ENDOMORPHISM
- test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
-#endif
- test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ secp256k1_testrand_finish();
-#ifdef ENABLE_MODULE_RECOVERY
- test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
- test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
-#endif
-
- secp256k1_context_destroy(ctx);
+ printf("no problems found\n");
return 0;
}
-
diff --git a/src/secp256k1/src/util.h b/src/secp256k1/src/util.h
index a5cbe03ef5..3a88a41bc6 100644
--- a/src/secp256k1/src/util.h
+++ b/src/secp256k1/src/util.h
@@ -216,6 +216,24 @@ static SECP256K1_INLINE void memczero(void *s, size_t len, int flag) {
}
}
+/** Semantics like memcmp. Variable-time.
+ *
+ * We use this to avoid possible compiler bugs with memcmp, e.g.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189
+ */
+static SECP256K1_INLINE int secp256k1_memcmp_var(const void *s1, const void *s2, size_t n) {
+ const unsigned char *p1 = s1, *p2 = s2;
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ int diff = p1[i] - p2[i];
+ if (diff != 0) {
+ return diff;
+ }
+ }
+ return 0;
+}
+
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/
static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag) {
unsigned int mask0, mask1, r_masked, a_masked;
diff --git a/src/secp256k1/src/valgrind_ctime_test.c b/src/secp256k1/src/valgrind_ctime_test.c
index e676a8326c..3169e3651c 100644
--- a/src/secp256k1/src/valgrind_ctime_test.c
+++ b/src/secp256k1/src/valgrind_ctime_test.c
@@ -9,19 +9,19 @@
#include "assumptions.h"
#include "util.h"
-#if ENABLE_MODULE_ECDH
+#ifdef ENABLE_MODULE_ECDH
# include "include/secp256k1_ecdh.h"
#endif
-#if ENABLE_MODULE_RECOVERY
+#ifdef ENABLE_MODULE_RECOVERY
# include "include/secp256k1_recovery.h"
#endif
-#if ENABLE_MODULE_EXTRAKEYS
+#ifdef ENABLE_MODULE_EXTRAKEYS
# include "include/secp256k1_extrakeys.h"
#endif
-#if ENABLE_MODULE_SCHNORRSIG
+#ifdef ENABLE_MODULE_SCHNORRSIG
#include "include/secp256k1_schnorrsig.h"
#endif
@@ -37,11 +37,11 @@ int main(void) {
unsigned char key[32];
unsigned char sig[74];
unsigned char spubkey[33];
-#if ENABLE_MODULE_RECOVERY
+#ifdef ENABLE_MODULE_RECOVERY
secp256k1_ecdsa_recoverable_signature recoverable_signature;
int recid;
#endif
-#if ENABLE_MODULE_EXTRAKEYS
+#ifdef ENABLE_MODULE_EXTRAKEYS
secp256k1_keypair keypair;
#endif
@@ -81,7 +81,7 @@ int main(void) {
CHECK(ret);
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature));
-#if ENABLE_MODULE_ECDH
+#ifdef ENABLE_MODULE_ECDH
/* Test ECDH. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_ecdh(ctx, msg, &pubkey, key, NULL, NULL);
@@ -89,7 +89,7 @@ int main(void) {
CHECK(ret == 1);
#endif
-#if ENABLE_MODULE_RECOVERY
+#ifdef ENABLE_MODULE_RECOVERY
/* Test signing a recoverable signature. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL);
@@ -129,7 +129,7 @@ int main(void) {
CHECK(ret);
/* Test keypair_create and keypair_xonly_tweak_add. */
-#if ENABLE_MODULE_EXTRAKEYS
+#ifdef ENABLE_MODULE_EXTRAKEYS
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_keypair_create(ctx, &keypair, key);
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
@@ -142,7 +142,7 @@ int main(void) {
CHECK(ret == 1);
#endif
-#if ENABLE_MODULE_SCHNORRSIG
+#ifdef ENABLE_MODULE_SCHNORRSIG
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_keypair_create(ctx, &keypair, key);
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
diff --git a/src/test/fuzz/txrequest.cpp b/src/test/fuzz/txrequest.cpp
new file mode 100644
index 0000000000..9529ad3274
--- /dev/null
+++ b/src/test/fuzz/txrequest.cpp
@@ -0,0 +1,374 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <crypto/common.h>
+#include <crypto/sha256.h>
+#include <crypto/siphash.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/fuzz.h>
+#include <txrequest.h>
+
+#include <bitset>
+#include <cstdint>
+#include <queue>
+#include <vector>
+
+namespace {
+
+constexpr int MAX_TXHASHES = 16;
+constexpr int MAX_PEERS = 16;
+
+//! Randomly generated GenTxids used in this test (length is MAX_TXHASHES).
+uint256 TXHASHES[MAX_TXHASHES];
+
+//! Precomputed random durations (positive and negative, each ~exponentially distributed).
+std::chrono::microseconds DELAYS[256];
+
+struct Initializer
+{
+ Initializer()
+ {
+ for (uint8_t txhash = 0; txhash < MAX_TXHASHES; txhash += 1) {
+ CSHA256().Write(&txhash, 1).Finalize(TXHASHES[txhash].begin());
+ }
+ int i = 0;
+ // DELAYS[N] for N=0..15 is just N microseconds.
+ for (; i < 16; ++i) {
+ DELAYS[i] = std::chrono::microseconds{i};
+ }
+ // DELAYS[N] for N=16..127 has randomly-looking but roughly exponentially increasing values up to
+ // 198.416453 seconds.
+ for (; i < 128; ++i) {
+ int diff_bits = ((i - 10) * 2) / 9;
+ uint64_t diff = 1 + (CSipHasher(0, 0).Write(i).Finalize() >> (64 - diff_bits));
+ DELAYS[i] = DELAYS[i - 1] + std::chrono::microseconds{diff};
+ }
+ // DELAYS[N] for N=128..255 are negative delays with the same magnitude as N=0..127.
+ for (; i < 256; ++i) {
+ DELAYS[i] = -DELAYS[255 - i];
+ }
+ }
+} g_initializer;
+
+/** Tester class for TxRequestTracker
+ *
+ * It includes a naive reimplementation of its behavior, for a limited set
+ * of MAX_TXHASHES distinct txids, and MAX_PEERS peer identifiers.
+ *
+ * All of the public member functions perform the same operation on
+ * an actual TxRequestTracker and on the state of the reimplementation.
+ * The output of GetRequestable is compared with the expected value
+ * as well.
+ *
+ * Check() calls the TxRequestTracker's sanity check, plus compares the
+ * output of the constant accessors (Size(), CountLoad(), CountTracked())
+ * with expected values.
+ */
+class Tester
+{
+ //! TxRequestTracker object being tested.
+ TxRequestTracker m_tracker;
+
+ //! States for txid/peer combinations in the naive data structure.
+ enum class State {
+ NOTHING, //!< Absence of this txid/peer combination
+
+ // Note that this implementation does not distinguish between DELAYED/READY/BEST variants of CANDIDATE.
+ CANDIDATE,
+ REQUESTED,
+ COMPLETED,
+ };
+
+ //! Sequence numbers, incremented whenever a new CANDIDATE is added.
+ uint64_t m_current_sequence{0};
+
+ //! List of future 'events' (all inserted reqtimes/exptimes). This is used to implement AdvanceToEvent.
+ std::priority_queue<std::chrono::microseconds, std::vector<std::chrono::microseconds>,
+ std::greater<std::chrono::microseconds>> m_events;
+
+ //! Information about a txhash/peer combination.
+ struct Announcement
+ {
+ std::chrono::microseconds m_time;
+ uint64_t m_sequence;
+ State m_state{State::NOTHING};
+ bool m_preferred;
+ bool m_is_wtxid;
+ uint64_t m_priority; //!< Precomputed priority.
+ };
+
+ //! Information about all txhash/peer combination.
+ Announcement m_announcements[MAX_TXHASHES][MAX_PEERS];
+
+ //! The current time; can move forward and backward.
+ std::chrono::microseconds m_now{244466666};
+
+ //! Delete txhashes whose only announcements are COMPLETED.
+ void Cleanup(int txhash)
+ {
+ bool all_nothing = true;
+ for (int peer = 0; peer < MAX_PEERS; ++peer) {
+ const Announcement& ann = m_announcements[txhash][peer];
+ if (ann.m_state != State::NOTHING) {
+ if (ann.m_state != State::COMPLETED) return;
+ all_nothing = false;
+ }
+ }
+ if (all_nothing) return;
+ for (int peer = 0; peer < MAX_PEERS; ++peer) {
+ m_announcements[txhash][peer].m_state = State::NOTHING;
+ }
+ }
+
+ //! Find the current best peer to request from for a txhash (or -1 if none).
+ int GetSelected(int txhash) const
+ {
+ int ret = -1;
+ uint64_t ret_priority = 0;
+ for (int peer = 0; peer < MAX_PEERS; ++peer) {
+ const Announcement& ann = m_announcements[txhash][peer];
+ // Return -1 if there already is a (non-expired) in-flight request.
+ if (ann.m_state == State::REQUESTED) return -1;
+ // If it's a viable candidate, see if it has lower priority than the best one so far.
+ if (ann.m_state == State::CANDIDATE && ann.m_time <= m_now) {
+ if (ret == -1 || ann.m_priority > ret_priority) {
+ std::tie(ret, ret_priority) = std::tie(peer, ann.m_priority);
+ }
+ }
+ }
+ return ret;
+ }
+
+public:
+ Tester() : m_tracker(true) {}
+
+ std::chrono::microseconds Now() const { return m_now; }
+
+ void AdvanceTime(std::chrono::microseconds offset)
+ {
+ m_now += offset;
+ while (!m_events.empty() && m_events.top() <= m_now) m_events.pop();
+ }
+
+ void AdvanceToEvent()
+ {
+ while (!m_events.empty() && m_events.top() <= m_now) m_events.pop();
+ if (!m_events.empty()) {
+ m_now = m_events.top();
+ m_events.pop();
+ }
+ }
+
+ void DisconnectedPeer(int peer)
+ {
+ // Apply to naive structure: all announcements for that peer are wiped.
+ for (int txhash = 0; txhash < MAX_TXHASHES; ++txhash) {
+ if (m_announcements[txhash][peer].m_state != State::NOTHING) {
+ m_announcements[txhash][peer].m_state = State::NOTHING;
+ Cleanup(txhash);
+ }
+ }
+
+ // Call TxRequestTracker's implementation.
+ m_tracker.DisconnectedPeer(peer);
+ }
+
+ void ForgetTxHash(int txhash)
+ {
+ // Apply to naive structure: all announcements for that txhash are wiped.
+ for (int peer = 0; peer < MAX_PEERS; ++peer) {
+ m_announcements[txhash][peer].m_state = State::NOTHING;
+ }
+ Cleanup(txhash);
+
+ // Call TxRequestTracker's implementation.
+ m_tracker.ForgetTxHash(TXHASHES[txhash]);
+ }
+
+ void ReceivedInv(int peer, int txhash, bool is_wtxid, bool preferred, std::chrono::microseconds reqtime)
+ {
+ // Apply to naive structure: if no announcement for txidnum/peer combination
+ // already, create a new CANDIDATE; otherwise do nothing.
+ Announcement& ann = m_announcements[txhash][peer];
+ if (ann.m_state == State::NOTHING) {
+ ann.m_preferred = preferred;
+ ann.m_state = State::CANDIDATE;
+ ann.m_time = reqtime;
+ ann.m_is_wtxid = is_wtxid;
+ ann.m_sequence = m_current_sequence++;
+ ann.m_priority = m_tracker.ComputePriority(TXHASHES[txhash], peer, ann.m_preferred);
+
+ // Add event so that AdvanceToEvent can quickly jump to the point where its reqtime passes.
+ if (reqtime > m_now) m_events.push(reqtime);
+ }
+
+ // Call TxRequestTracker's implementation.
+ m_tracker.ReceivedInv(peer, GenTxid{is_wtxid, TXHASHES[txhash]}, preferred, reqtime);
+ }
+
+ void RequestedTx(int peer, int txhash, std::chrono::microseconds exptime)
+ {
+ // Apply to naive structure: if a CANDIDATE announcement exists for peer/txhash,
+ // convert it to REQUESTED, and change any existing REQUESTED announcement for the same txhash to COMPLETED.
+ if (m_announcements[txhash][peer].m_state == State::CANDIDATE) {
+ for (int peer2 = 0; peer2 < MAX_PEERS; ++peer2) {
+ if (m_announcements[txhash][peer2].m_state == State::REQUESTED) {
+ m_announcements[txhash][peer2].m_state = State::COMPLETED;
+ }
+ }
+ m_announcements[txhash][peer].m_state = State::REQUESTED;
+ m_announcements[txhash][peer].m_time = exptime;
+ }
+
+ // Add event so that AdvanceToEvent can quickly jump to the point where its exptime passes.
+ if (exptime > m_now) m_events.push(exptime);
+
+ // Call TxRequestTracker's implementation.
+ m_tracker.RequestedTx(peer, TXHASHES[txhash], exptime);
+ }
+
+ void ReceivedResponse(int peer, int txhash)
+ {
+ // Apply to naive structure: convert anything to COMPLETED.
+ if (m_announcements[txhash][peer].m_state != State::NOTHING) {
+ m_announcements[txhash][peer].m_state = State::COMPLETED;
+ Cleanup(txhash);
+ }
+
+ // Call TxRequestTracker's implementation.
+ m_tracker.ReceivedResponse(peer, TXHASHES[txhash]);
+ }
+
+ void GetRequestable(int peer)
+ {
+ // Implement using naive structure:
+
+ //! list of (sequence number, txhash, is_wtxid) tuples.
+ std::vector<std::tuple<uint64_t, int, bool>> result;
+ std::vector<std::pair<NodeId, GenTxid>> expected_expired;
+ for (int txhash = 0; txhash < MAX_TXHASHES; ++txhash) {
+ // Mark any expired REQUESTED announcements as COMPLETED.
+ for (int peer2 = 0; peer2 < MAX_PEERS; ++peer2) {
+ Announcement& ann2 = m_announcements[txhash][peer2];
+ if (ann2.m_state == State::REQUESTED && ann2.m_time <= m_now) {
+ expected_expired.emplace_back(peer2, GenTxid{ann2.m_is_wtxid, TXHASHES[txhash]});
+ ann2.m_state = State::COMPLETED;
+ break;
+ }
+ }
+ // And delete txids with only COMPLETED announcements left.
+ Cleanup(txhash);
+ // CANDIDATEs for which this announcement has the highest priority get returned.
+ const Announcement& ann = m_announcements[txhash][peer];
+ if (ann.m_state == State::CANDIDATE && GetSelected(txhash) == peer) {
+ result.emplace_back(ann.m_sequence, txhash, ann.m_is_wtxid);
+ }
+ }
+ // Sort the results by sequence number.
+ std::sort(result.begin(), result.end());
+ std::sort(expected_expired.begin(), expected_expired.end());
+
+ // Compare with TxRequestTracker's implementation.
+ std::vector<std::pair<NodeId, GenTxid>> expired;
+ const auto actual = m_tracker.GetRequestable(peer, m_now, &expired);
+ std::sort(expired.begin(), expired.end());
+ assert(expired == expected_expired);
+
+ m_tracker.PostGetRequestableSanityCheck(m_now);
+ assert(result.size() == actual.size());
+ for (size_t pos = 0; pos < actual.size(); ++pos) {
+ assert(TXHASHES[std::get<1>(result[pos])] == actual[pos].GetHash());
+ assert(std::get<2>(result[pos]) == actual[pos].IsWtxid());
+ }
+ }
+
+ void Check()
+ {
+ // Compare CountTracked and CountLoad with naive structure.
+ size_t total = 0;
+ for (int peer = 0; peer < MAX_PEERS; ++peer) {
+ size_t tracked = 0;
+ size_t inflight = 0;
+ size_t candidates = 0;
+ for (int txhash = 0; txhash < MAX_TXHASHES; ++txhash) {
+ tracked += m_announcements[txhash][peer].m_state != State::NOTHING;
+ inflight += m_announcements[txhash][peer].m_state == State::REQUESTED;
+ candidates += m_announcements[txhash][peer].m_state == State::CANDIDATE;
+ }
+ assert(m_tracker.Count(peer) == tracked);
+ assert(m_tracker.CountInFlight(peer) == inflight);
+ assert(m_tracker.CountCandidates(peer) == candidates);
+ total += tracked;
+ }
+ // Compare Size.
+ assert(m_tracker.Size() == total);
+
+ // Invoke internal consistency check of TxRequestTracker object.
+ m_tracker.SanityCheck();
+ }
+};
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ // Tester object (which encapsulates a TxRequestTracker).
+ Tester tester;
+
+ // Decode the input as a sequence of instructions with parameters
+ auto it = buffer.begin();
+ while (it != buffer.end()) {
+ int cmd = *(it++) % 11;
+ int peer, txidnum, delaynum;
+ switch (cmd) {
+ case 0: // Make time jump to the next event (m_time of CANDIDATE or REQUESTED)
+ tester.AdvanceToEvent();
+ break;
+ case 1: // Change time
+ delaynum = it == buffer.end() ? 0 : *(it++);
+ tester.AdvanceTime(DELAYS[delaynum]);
+ break;
+ case 2: // Query for requestable txs
+ peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS;
+ tester.GetRequestable(peer);
+ break;
+ case 3: // Peer went offline
+ peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS;
+ tester.DisconnectedPeer(peer);
+ break;
+ case 4: // No longer need tx
+ txidnum = it == buffer.end() ? 0 : *(it++);
+ tester.ForgetTxHash(txidnum % MAX_TXHASHES);
+ break;
+ case 5: // Received immediate preferred inv
+ case 6: // Same, but non-preferred.
+ peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS;
+ txidnum = it == buffer.end() ? 0 : *(it++);
+ tester.ReceivedInv(peer, txidnum % MAX_TXHASHES, (txidnum / MAX_TXHASHES) & 1, cmd & 1,
+ std::chrono::microseconds::min());
+ break;
+ case 7: // Received delayed preferred inv
+ case 8: // Same, but non-preferred.
+ peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS;
+ txidnum = it == buffer.end() ? 0 : *(it++);
+ delaynum = it == buffer.end() ? 0 : *(it++);
+ tester.ReceivedInv(peer, txidnum % MAX_TXHASHES, (txidnum / MAX_TXHASHES) & 1, cmd & 1,
+ tester.Now() + DELAYS[delaynum]);
+ break;
+ case 9: // Requested tx from peer
+ peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS;
+ txidnum = it == buffer.end() ? 0 : *(it++);
+ delaynum = it == buffer.end() ? 0 : *(it++);
+ tester.RequestedTx(peer, txidnum % MAX_TXHASHES, tester.Now() + DELAYS[delaynum]);
+ break;
+ case 10: // Received response
+ peer = it == buffer.end() ? 0 : *(it++) % MAX_PEERS;
+ txidnum = it == buffer.end() ? 0 : *(it++);
+ tester.ReceivedResponse(peer, txidnum % MAX_TXHASHES);
+ break;
+ default:
+ assert(false);
+ }
+ }
+ tester.Check();
+}
diff --git a/src/test/limitedmap_tests.cpp b/src/test/limitedmap_tests.cpp
deleted file mode 100644
index ea18debbd3..0000000000
--- a/src/test/limitedmap_tests.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2012-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <limitedmap.h>
-
-#include <test/util/setup_common.h>
-
-#include <boost/test/unit_test.hpp>
-
-BOOST_FIXTURE_TEST_SUITE(limitedmap_tests, BasicTestingSetup)
-
-BOOST_AUTO_TEST_CASE(limitedmap_test)
-{
- // create a limitedmap capped at 10 items
- limitedmap<int, int> map(10);
-
- // check that the max size is 10
- BOOST_CHECK(map.max_size() == 10);
-
- // check that it's empty
- BOOST_CHECK(map.size() == 0);
-
- // insert (-1, -1)
- map.insert(std::pair<int, int>(-1, -1));
-
- // make sure that the size is updated
- BOOST_CHECK(map.size() == 1);
-
- // make sure that the new item is in the map
- BOOST_CHECK(map.count(-1) == 1);
-
- // insert 10 new items
- for (int i = 0; i < 10; i++) {
- map.insert(std::pair<int, int>(i, i + 1));
- }
-
- // make sure that the map now contains 10 items...
- BOOST_CHECK(map.size() == 10);
-
- // ...and that the first item has been discarded
- BOOST_CHECK(map.count(-1) == 0);
-
- // iterate over the map, both with an index and an iterator
- limitedmap<int, int>::const_iterator it = map.begin();
- for (int i = 0; i < 10; i++) {
- // make sure the item is present
- BOOST_CHECK(map.count(i) == 1);
-
- // use the iterator to check for the expected key and value
- BOOST_CHECK(it->first == i);
- BOOST_CHECK(it->second == i + 1);
-
- // use find to check for the value
- BOOST_CHECK(map.find(i)->second == i + 1);
-
- // update and recheck
- map.update(it, i + 2);
- BOOST_CHECK(map.find(i)->second == i + 2);
-
- it++;
- }
-
- // check that we've exhausted the iterator
- BOOST_CHECK(it == map.end());
-
- // resize the map to 5 items
- map.max_size(5);
-
- // check that the max size and size are now 5
- BOOST_CHECK(map.max_size() == 5);
- BOOST_CHECK(map.size() == 5);
-
- // check that items less than 5 have been discarded
- // and items greater than 5 are retained
- for (int i = 0; i < 10; i++) {
- if (i < 5) {
- BOOST_CHECK(map.count(i) == 0);
- } else {
- BOOST_CHECK(map.count(i) == 1);
- }
- }
-
- // erase some items not in the map
- for (int i = 100; i < 1000; i += 100) {
- map.erase(i);
- }
-
- // check that the size is unaffected
- BOOST_CHECK(map.size() == 5);
-
- // erase the remaining elements
- for (int i = 5; i < 10; i++) {
- map.erase(i);
- }
-
- // check that the map is now empty
- BOOST_CHECK(map.empty());
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/txrequest_tests.cpp b/src/test/txrequest_tests.cpp
new file mode 100644
index 0000000000..1d137b03b1
--- /dev/null
+++ b/src/test/txrequest_tests.cpp
@@ -0,0 +1,738 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+
+#include <txrequest.h>
+#include <uint256.h>
+
+#include <test/util/setup_common.h>
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(txrequest_tests, BasicTestingSetup)
+
+namespace {
+
+constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min();
+constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max();
+constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1};
+constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0};
+
+/** An Action is a function to call at a particular (simulated) timestamp. */
+using Action = std::pair<std::chrono::microseconds, std::function<void()>>;
+
+/** Object that stores actions from multiple interleaved scenarios, and data shared across them.
+ *
+ * The Scenario below is used to fill this.
+ */
+struct Runner
+{
+ /** The TxRequestTracker being tested. */
+ TxRequestTracker txrequest;
+
+ /** List of actions to be executed (in order of increasing timestamp). */
+ std::vector<Action> actions;
+
+ /** Which node ids have been assigned already (to prevent reuse). */
+ std::set<NodeId> peerset;
+
+ /** Which txhashes have been assigned already (to prevent reuse). */
+ std::set<uint256> txhashset;
+
+ /** Which (peer, gtxid) combinations are known to be expired. These need to be accumulated here instead of
+ * checked directly in the GetRequestable return value to avoid introducing a dependency between the various
+ * parallel tests. */
+ std::multiset<std::pair<NodeId, GenTxid>> expired;
+};
+
+std::chrono::microseconds RandomTime8s() { return std::chrono::microseconds{1 + InsecureRandBits(23)}; }
+std::chrono::microseconds RandomTime1y() { return std::chrono::microseconds{1 + InsecureRandBits(45)}; }
+
+/** A proxy for a Runner that helps build a sequence of consecutive test actions on a TxRequestTracker.
+ *
+ * Each Scenario is a proxy through which actions for the (sequential) execution of various tests are added to a
+ * Runner. The actions from multiple scenarios are then run concurrently, resulting in these tests being performed
+ * against a TxRequestTracker in parallel. Every test has its own unique txhashes and NodeIds which are not
+ * reused in other tests, and thus they should be independent from each other. Running them in parallel however
+ * means that we verify the behavior (w.r.t. one test's txhashes and NodeIds) even when the state of the data
+ * structure is more complicated due to the presence of other tests.
+ */
+class Scenario
+{
+ Runner& m_runner;
+ std::chrono::microseconds m_now;
+ std::string m_testname;
+
+public:
+ Scenario(Runner& runner, std::chrono::microseconds starttime) : m_runner(runner), m_now(starttime) {}
+
+ /** Set a name for the current test, to give more clear error messages. */
+ void SetTestName(std::string testname)
+ {
+ m_testname = std::move(testname);
+ }
+
+ /** Advance this Scenario's time; this affects the timestamps newly scheduled events get. */
+ void AdvanceTime(std::chrono::microseconds amount)
+ {
+ assert(amount.count() >= 0);
+ m_now += amount;
+ }
+
+ /** Schedule a ForgetTxHash call at the Scheduler's current time. */
+ void ForgetTxHash(const uint256& txhash)
+ {
+ auto& runner = m_runner;
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ runner.txrequest.ForgetTxHash(txhash);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a ReceivedInv call at the Scheduler's current time. */
+ void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime)
+ {
+ auto& runner = m_runner;
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a DisconnectedPeer call at the Scheduler's current time. */
+ void DisconnectedPeer(NodeId peer)
+ {
+ auto& runner = m_runner;
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ runner.txrequest.DisconnectedPeer(peer);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a RequestedTx call at the Scheduler's current time. */
+ void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime)
+ {
+ auto& runner = m_runner;
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ runner.txrequest.RequestedTx(peer, txhash, exptime);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule a ReceivedResponse call at the Scheduler's current time. */
+ void ReceivedResponse(NodeId peer, const uint256& txhash)
+ {
+ auto& runner = m_runner;
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ runner.txrequest.ReceivedResponse(peer, txhash);
+ runner.txrequest.SanityCheck();
+ });
+ }
+
+ /** Schedule calls to verify the TxRequestTracker's state at the Scheduler's current time.
+ *
+ * @param peer The peer whose state will be inspected.
+ * @param expected The expected return value for GetRequestable(peer)
+ * @param candidates The expected return value CountCandidates(peer)
+ * @param inflight The expected return value CountInFlight(peer)
+ * @param completed The expected return value of Count(peer), minus candidates and inflight.
+ * @param checkname An arbitrary string to include in error messages, for test identificatrion.
+ * @param offset Offset with the current time to use (must be <= 0). This allows simulations of time going
+ * backwards (but note that the ordering of this event only follows the scenario's m_now.
+ */
+ void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight,
+ size_t completed, const std::string& checkname,
+ std::chrono::microseconds offset = std::chrono::microseconds{0})
+ {
+ const auto comment = m_testname + " " + checkname;
+ auto& runner = m_runner;
+ const auto now = m_now;
+ assert(offset.count() <= 0);
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ std::vector<std::pair<NodeId, GenTxid>> expired_now;
+ auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now);
+ for (const auto& entry : expired_now) runner.expired.insert(entry);
+ runner.txrequest.SanityCheck();
+ runner.txrequest.PostGetRequestableSanityCheck(now + offset);
+ size_t total = candidates + inflight + completed;
+ size_t real_total = runner.txrequest.Count(peer);
+ size_t real_candidates = runner.txrequest.CountCandidates(peer);
+ size_t real_inflight = runner.txrequest.CountInFlight(peer);
+ BOOST_CHECK_MESSAGE(real_total == total, strprintf("[" + comment + "] total %i (%i expected)", real_total, total));
+ BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[" + comment + "] inflight %i (%i expected)", real_inflight, inflight));
+ BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[" + comment + "] candidates %i (%i expected)", real_candidates, candidates));
+ BOOST_CHECK_MESSAGE(ret == expected, "[" + comment + "] mismatching requestables");
+ });
+ }
+
+ /** Verify that an announcement for gtxid by peer has expired some time before this check is scheduled.
+ *
+ * Every expected expiration should be accounted for through exactly one call to this function.
+ */
+ void CheckExpired(NodeId peer, GenTxid gtxid)
+ {
+ const auto& testname = m_testname;
+ auto& runner = m_runner;
+ runner.actions.emplace_back(m_now, [=,&runner]() {
+ auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid});
+ BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration");
+ if (it != runner.expired.end()) runner.expired.erase(it);
+ });
+ }
+
+ /** Generate a random txhash, whose priorities for certain peers are constrained.
+ *
+ * For example, NewTxHash({{p1,p2,p3},{p2,p4,p5}}) will generate a txhash T such that both:
+ * - priority(p1,T) > priority(p2,T) > priority(p3,T)
+ * - priority(p2,T) > priority(p4,T) > priority(p5,T)
+ * where priority is the predicted internal TxRequestTracker's priority, assuming all announcements
+ * are within the same preferredness class.
+ */
+ uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {})
+ {
+ uint256 ret;
+ bool ok;
+ do {
+ ret = InsecureRand256();
+ ok = true;
+ for (const auto& order : orders) {
+ for (size_t pos = 1; pos < order.size(); ++pos) {
+ uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true);
+ uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true);
+ if (prio_prev <= prio_cur) {
+ ok = false;
+ break;
+ }
+ }
+ if (!ok) break;
+ }
+ if (ok) {
+ ok = m_runner.txhashset.insert(ret).second;
+ }
+ } while(!ok);
+ return ret;
+ }
+
+ /** Generate a random GenTxid; the txhash follows NewTxHash; the is_wtxid flag is random. */
+ GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {})
+ {
+ return {InsecureRandBool(), NewTxHash(orders)};
+ }
+
+ /** Generate a new random NodeId to use as peer. The same NodeId is never returned twice
+ * (across all Scenarios combined). */
+ NodeId NewPeer()
+ {
+ bool ok;
+ NodeId ret;
+ do {
+ ret = InsecureRandBits(63);
+ ok = m_runner.peerset.insert(ret).second;
+ } while(!ok);
+ return ret;
+ }
+
+ std::chrono::microseconds Now() const { return m_now; }
+};
+
+/** Add to scenario a test with a single tx announced by a single peer.
+ *
+ * config is an integer in [0, 32), which controls which variant of the test is used.
+ */
+void BuildSingleTest(Scenario& scenario, int config)
+{
+ auto peer = scenario.NewPeer();
+ auto gtxid = scenario.NewGTxid();
+ bool immediate = config & 1;
+ bool preferred = config & 2;
+ auto delay = immediate ? NO_TIME : RandomTime8s();
+
+ scenario.SetTestName(strprintf("Single(config=%i)", config));
+
+ // Receive an announcement, either immediately requestable or delayed.
+ scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay);
+ if (immediate) {
+ scenario.Check(peer, {gtxid}, 1, 0, 0, "s1");
+ } else {
+ scenario.Check(peer, {}, 1, 0, 0, "s2");
+ scenario.AdvanceTime(delay - MICROSECOND);
+ scenario.Check(peer, {}, 1, 0, 0, "s3");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(peer, {gtxid}, 1, 0, 0, "s4");
+ }
+
+ if (config >> 3) { // We'll request the transaction
+ scenario.AdvanceTime(RandomTime8s());
+ auto expiry = RandomTime8s();
+ scenario.Check(peer, {gtxid}, 1, 0, 0, "s5");
+ scenario.RequestedTx(peer, gtxid.GetHash(), scenario.Now() + expiry);
+ scenario.Check(peer, {}, 0, 1, 0, "s6");
+
+ if ((config >> 3) == 1) { // The request will time out
+ scenario.AdvanceTime(expiry - MICROSECOND);
+ scenario.Check(peer, {}, 0, 1, 0, "s7");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(peer, {}, 0, 0, 0, "s8");
+ scenario.CheckExpired(peer, gtxid);
+ return;
+ } else {
+ scenario.AdvanceTime(std::chrono::microseconds{InsecureRandRange(expiry.count())});
+ scenario.Check(peer, {}, 0, 1, 0, "s9");
+ if ((config >> 3) == 3) { // A response will arrive for the transaction
+ scenario.ReceivedResponse(peer, gtxid.GetHash());
+ scenario.Check(peer, {}, 0, 0, 0, "s10");
+ return;
+ }
+ }
+ }
+
+ if (config & 4) { // The peer will go offline
+ scenario.DisconnectedPeer(peer);
+ } else { // The transaction is no longer needed
+ scenario.ForgetTxHash(gtxid.GetHash());
+ }
+ scenario.Check(peer, {}, 0, 0, 0, "s11");
+}
+
+/** Add to scenario a test with a single tx announced by two peers, to verify the
+ * right peer is selected for requests.
+ *
+ * config is an integer in [0, 32), which controls which variant of the test is used.
+ */
+void BuildPriorityTest(Scenario& scenario, int config)
+{
+ scenario.SetTestName(strprintf("Priority(config=%i)", config));
+
+ // Two peers. They will announce in order {peer1, peer2}.
+ auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer();
+ // Construct a transaction that under random rules would be preferred by peer2 or peer1,
+ // depending on configuration.
+ bool prio1 = config & 1;
+ auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}});
+ bool pref1 = config & 2, pref2 = config & 4;
+
+ scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME);
+ scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1");
+ if (InsecureRandBool()) {
+ scenario.AdvanceTime(RandomTime8s());
+ scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2");
+ }
+
+ scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME);
+ bool stage2_prio =
+ // At this point, peer2 will be given priority if:
+ // - It is preferred and peer1 is not
+ (pref2 && !pref1) ||
+ // - They're in the same preference class,
+ // and the randomized priority favors peer2 over peer1.
+ (pref1 == pref2 && !prio1);
+ NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2;
+ scenario.Check(otherpeer, {}, 1, 0, 0, "p3");
+ scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4");
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.Check(otherpeer, {}, 1, 0, 0, "p5");
+ scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6");
+
+ // We possibly request from the selected peer.
+ if (config & 8) {
+ scenario.RequestedTx(priopeer, gtxid.GetHash(), MAX_TIME);
+ scenario.Check(priopeer, {}, 0, 1, 0, "p7");
+ scenario.Check(otherpeer, {}, 1, 0, 0, "p8");
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ }
+
+ // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them.
+ if (config & 16) {
+ scenario.DisconnectedPeer(priopeer);
+ } else {
+ scenario.ReceivedResponse(priopeer, gtxid.GetHash());
+ }
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8");
+ scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9");
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+
+ // Now the other peer goes offline.
+ scenario.DisconnectedPeer(otherpeer);
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.Check(peer1, {}, 0, 0, 0, "p10");
+ scenario.Check(peer2, {}, 0, 0, 0, "p11");
+}
+
+/** Add to scenario a randomized test in which N peers announce the same transaction, to verify
+ * the order in which they are requested. */
+void BuildBigPriorityTest(Scenario& scenario, int peers)
+{
+ scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers));
+
+ // We will have N peers announce the same transaction.
+ std::map<NodeId, bool> preferred;
+ std::vector<NodeId> pref_peers, npref_peers;
+ int num_pref = InsecureRandRange(peers + 1) ; // Some preferred, ...
+ int num_npref = peers - num_pref; // some not preferred.
+ for (int i = 0; i < num_pref; ++i) {
+ pref_peers.push_back(scenario.NewPeer());
+ preferred[pref_peers.back()] = true;
+ }
+ for (int i = 0; i < num_npref; ++i) {
+ npref_peers.push_back(scenario.NewPeer());
+ preferred[npref_peers.back()] = false;
+ }
+ // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers).
+ std::vector<NodeId> request_order;
+ for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]);
+ for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]);
+
+ // Determine the announcement order randomly.
+ std::vector<NodeId> announce_order = request_order;
+ Shuffle(announce_order.begin(), announce_order.end(), g_insecure_rand_ctx);
+
+ // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and
+ // within npref_peers.
+ auto gtxid = scenario.NewGTxid({pref_peers, npref_peers});
+
+ // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the
+ // to-be-requested-from-peer will change every time a subsequent reqtime is passed.
+ std::map<NodeId, std::chrono::microseconds> reqtimes;
+ auto reqtime = scenario.Now();
+ for (int i = peers - 1; i >= 0; --i) {
+ reqtime += RandomTime8s();
+ reqtimes[request_order[i]] = reqtime;
+ }
+
+ // Actually announce from all peers simultaneously (but in announce_order).
+ for (const auto peer : announce_order) {
+ scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]);
+ }
+ for (const auto peer : announce_order) {
+ scenario.Check(peer, {}, 1, 0, 0, "b1");
+ }
+
+ // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from
+ // high priority to low priority within each class.
+ for (int i = peers - 1; i >= 0; --i) {
+ scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND);
+ scenario.Check(request_order[i], {}, 1, 0, 0, "b2");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3");
+ }
+
+ // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from
+ // peer should be the best remaining one, so verify this after every response.
+ for (int i = 0; i < peers; ++i) {
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ const int pos = InsecureRandRange(request_order.size());
+ const auto peer = request_order[pos];
+ request_order.erase(request_order.begin() + pos);
+ if (InsecureRandBool()) {
+ scenario.DisconnectedPeer(peer);
+ scenario.Check(peer, {}, 0, 0, 0, "b4");
+ } else {
+ scenario.ReceivedResponse(peer, gtxid.GetHash());
+ scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5");
+ }
+ if (request_order.size()) {
+ scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6");
+ }
+ }
+
+ // Everything is gone in the end.
+ for (const auto peer : announce_order) {
+ scenario.Check(peer, {}, 0, 0, 0, "b7");
+ }
+}
+
+/** Add to scenario a test with one peer announcing two transactions, to verify they are
+ * fetched in announcement order.
+ *
+ * config is an integer in [0, 4) inclusive, and selects the variant of the test.
+ */
+void BuildRequestOrderTest(Scenario& scenario, int config)
+{
+ scenario.SetTestName(strprintf("RequestOrder(config=%i)", config));
+
+ auto peer = scenario.NewPeer();
+ auto gtxid1 = scenario.NewGTxid();
+ auto gtxid2 = scenario.NewGTxid();
+
+ auto reqtime2 = scenario.Now() + RandomTime8s();
+ auto reqtime1 = reqtime2 + RandomTime8s();
+
+ scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1);
+ // Simulate time going backwards by giving the second announcement an earlier reqtime.
+ scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2);
+
+ scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now());
+ scenario.Check(peer, {}, 2, 0, 0, "o1");
+ scenario.AdvanceTime(MICROSECOND);
+ scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2");
+ scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now());
+ scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3");
+ scenario.AdvanceTime(MICROSECOND);
+ // Even with time going backwards in between announcements, the return value of GetRequestable is in
+ // announcement order.
+ scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4");
+
+ scenario.DisconnectedPeer(peer);
+ scenario.Check(peer, {}, 0, 0, 0, "o5");
+}
+
+/** Add to scenario a test that verifies behavior related to both txid and wtxid with the same
+ * hash being announced.
+ *
+ * config is an integer in [0, 4) inclusive, and selects the variant of the test used.
+*/
+void BuildWtxidTest(Scenario& scenario, int config)
+{
+ scenario.SetTestName(strprintf("Wtxid(config=%i)", config));
+
+ auto peerT = scenario.NewPeer();
+ auto peerW = scenario.NewPeer();
+ auto txhash = scenario.NewTxHash();
+ GenTxid txid{false, txhash};
+ GenTxid wtxid{true, txhash};
+
+ auto reqtimeT = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s();
+ auto reqtimeW = InsecureRandBool() ? MIN_TIME : scenario.Now() + RandomTime8s();
+
+ // Announce txid first or wtxid first.
+ if (config & 1) {
+ scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
+ } else {
+ scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
+ }
+
+ // Let time pass if needed, and check that the preferred announcement (txid or wtxid)
+ // is correctly to-be-requested (and with the correct wtxidness).
+ auto max_reqtime = std::max(reqtimeT, reqtimeW);
+ if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now());
+ if (config & 2) {
+ scenario.Check(peerT, {txid}, 1, 0, 0, "w1");
+ scenario.Check(peerW, {}, 1, 0, 0, "w2");
+ } else {
+ scenario.Check(peerT, {}, 1, 0, 0, "w3");
+ scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4");
+ }
+
+ // Let the preferred announcement be requested. It's not going to be delivered.
+ auto expiry = RandomTime8s();
+ if (config & 2) {
+ scenario.RequestedTx(peerT, txid.GetHash(), scenario.Now() + expiry);
+ scenario.Check(peerT, {}, 0, 1, 0, "w5");
+ scenario.Check(peerW, {}, 1, 0, 0, "w6");
+ } else {
+ scenario.RequestedTx(peerW, wtxid.GetHash(), scenario.Now() + expiry);
+ scenario.Check(peerT, {}, 1, 0, 0, "w7");
+ scenario.Check(peerW, {}, 0, 1, 0, "w8");
+ }
+
+ // After reaching expiration time of the preferred announcement, verify that the
+ // remaining one is requestable
+ scenario.AdvanceTime(expiry);
+ if (config & 2) {
+ scenario.Check(peerT, {}, 0, 0, 1, "w9");
+ scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10");
+ scenario.CheckExpired(peerT, txid);
+ } else {
+ scenario.Check(peerT, {txid}, 1, 0, 0, "w11");
+ scenario.Check(peerW, {}, 0, 0, 1, "w12");
+ scenario.CheckExpired(peerW, wtxid);
+ }
+
+ // If a good transaction with either that hash as wtxid or txid arrives, both
+ // announcements are gone.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.ForgetTxHash(txhash);
+ scenario.Check(peerT, {}, 0, 0, 0, "w13");
+ scenario.Check(peerW, {}, 0, 0, 0, "w14");
+}
+
+/** Add to scenario a test that exercises clocks that go backwards. */
+void BuildTimeBackwardsTest(Scenario& scenario)
+{
+ auto peer1 = scenario.NewPeer();
+ auto peer2 = scenario.NewPeer();
+ auto gtxid = scenario.NewGTxid({{peer1, peer2}});
+
+ // Announce from peer2.
+ auto reqtime = scenario.Now() + RandomTime8s();
+ scenario.ReceivedInv(peer2, gtxid, true, reqtime);
+ scenario.Check(peer2, {}, 1, 0, 0, "r1");
+ scenario.AdvanceTime(reqtime - scenario.Now());
+ scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2");
+ // Check that if the clock goes backwards by 1us, the transaction would stop being requested.
+ scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND);
+ // But it reverts to being requested if time goes forward again.
+ scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4");
+
+ // Announce from peer1.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME);
+ scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5");
+ scenario.Check(peer1, {}, 1, 0, 0, "r6");
+
+ // Request from peer1.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ auto expiry = scenario.Now() + RandomTime8s();
+ scenario.RequestedTx(peer1, gtxid.GetHash(), expiry);
+ scenario.Check(peer1, {}, 0, 1, 0, "r7");
+ scenario.Check(peer2, {}, 1, 0, 0, "r8");
+
+ // Expiration passes.
+ scenario.AdvanceTime(expiry - scenario.Now());
+ scenario.Check(peer1, {}, 0, 0, 1, "r9");
+ scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2.
+ scenario.CheckExpired(peer1, gtxid);
+ scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire.
+ scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND);
+
+ // Peer2 goes offline, meaning no viable announcements remain.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.DisconnectedPeer(peer2);
+ scenario.Check(peer1, {}, 0, 0, 0, "r13");
+ scenario.Check(peer2, {}, 0, 0, 0, "r14");
+}
+
+/** Add to scenario a test that involves RequestedTx() calls for txhashes not returned by GetRequestable. */
+void BuildWeirdRequestsTest(Scenario& scenario)
+{
+ auto peer1 = scenario.NewPeer();
+ auto peer2 = scenario.NewPeer();
+ auto gtxid1 = scenario.NewGTxid({{peer1, peer2}});
+ auto gtxid2 = scenario.NewGTxid({{peer2, peer1}});
+
+ // Announce gtxid1 by peer1.
+ scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME);
+ scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1");
+
+ // Announce gtxid2 by peer2.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME);
+ scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2");
+ scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3");
+
+ // We request gtxid2 from *peer1* - no effect.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
+ scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4");
+ scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5");
+
+ // Now request gtxid1 from peer1 - marks it as REQUESTED.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ auto expiryA = scenario.Now() + RandomTime8s();
+ scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryA);
+ scenario.Check(peer1, {}, 0, 1, 0, "q6");
+ scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7");
+
+ // Request it a second time - nothing happens, as it's already REQUESTED.
+ auto expiryB = expiryA + RandomTime8s();
+ scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryB);
+ scenario.Check(peer1, {}, 0, 1, 0, "q8");
+ scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9");
+
+ // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires.
+ scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME);
+ scenario.Check(peer1, {}, 0, 1, 0, "q10");
+ scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11");
+
+ // When reaching expiryA, it expires (not expiryB, which is later).
+ scenario.AdvanceTime(expiryA - scenario.Now());
+ scenario.Check(peer1, {}, 0, 0, 1, "q12");
+ scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13");
+ scenario.CheckExpired(peer1, gtxid1);
+
+ // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.RequestedTx(peer1, gtxid1.GetHash(), MAX_TIME);
+ scenario.Check(peer1, {}, 0, 0, 1, "q14");
+ scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15");
+
+ // Now announce gtxid2 from peer1.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME);
+ scenario.Check(peer1, {}, 1, 0, 1, "q16");
+ scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17");
+
+ // And request it from peer1 (weird as peer2 has the preference).
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
+ scenario.Check(peer1, {}, 0, 1, 1, "q18");
+ scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19");
+
+ // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED.
+ if (InsecureRandBool()) scenario.AdvanceTime(RandomTime8s());
+ scenario.RequestedTx(peer2, gtxid2.GetHash(), MAX_TIME);
+ scenario.Check(peer1, {}, 0, 0, 2, "q20");
+ scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21");
+
+ // If peer2 goes offline, no viable announcements remain.
+ scenario.DisconnectedPeer(peer2);
+ scenario.Check(peer1, {}, 0, 0, 0, "q22");
+ scenario.Check(peer2, {}, 0, 0, 0, "q23");
+}
+
+void TestInterleavedScenarios()
+{
+ // Create a list of functions which add tests to scenarios.
+ std::vector<std::function<void(Scenario&)>> builders;
+ // Add instances of every test, for every configuration.
+ for (int n = 0; n < 64; ++n) {
+ builders.emplace_back([n](Scenario& scenario){ BuildWtxidTest(scenario, n); });
+ builders.emplace_back([n](Scenario& scenario){ BuildRequestOrderTest(scenario, n & 3); });
+ builders.emplace_back([n](Scenario& scenario){ BuildSingleTest(scenario, n & 31); });
+ builders.emplace_back([n](Scenario& scenario){ BuildPriorityTest(scenario, n & 31); });
+ builders.emplace_back([n](Scenario& scenario){ BuildBigPriorityTest(scenario, (n & 7) + 1); });
+ builders.emplace_back([](Scenario& scenario){ BuildTimeBackwardsTest(scenario); });
+ builders.emplace_back([](Scenario& scenario){ BuildWeirdRequestsTest(scenario); });
+ }
+ // Randomly shuffle all those functions.
+ Shuffle(builders.begin(), builders.end(), g_insecure_rand_ctx);
+
+ Runner runner;
+ auto starttime = RandomTime1y();
+ // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each.
+ while (builders.size()) {
+ // Introduce some variation in the start time of each scenario, so they don't all start off
+ // concurrently, but get a more random interleaving.
+ auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s();
+ Scenario scenario(runner, scenario_start);
+ for (int j = 0; builders.size() && j < 10; ++j) {
+ builders.back()(scenario);
+ builders.pop_back();
+ }
+ }
+ // Sort all the actions from all those scenarios chronologically, resulting in the actions from
+ // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario
+ // aren't reordered w.r.t. each other.
+ std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) {
+ return a1.first < a2.first;
+ });
+
+ // Run all actions from all scenarios, in order.
+ for (auto& action : runner.actions) {
+ action.second();
+ }
+
+ BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U);
+ BOOST_CHECK(runner.expired.empty());
+}
+
+} // namespace
+
+BOOST_AUTO_TEST_CASE(TxRequestTest)
+{
+ for (int i = 0; i < 5; ++i) {
+ TestInterleavedScenarios();
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/txrequest.cpp b/src/txrequest.cpp
new file mode 100644
index 0000000000..494786c201
--- /dev/null
+++ b/src/txrequest.cpp
@@ -0,0 +1,748 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <txrequest.h>
+
+#include <crypto/siphash.h>
+#include <net.h>
+#include <primitives/transaction.h>
+#include <random.h>
+#include <uint256.h>
+#include <util/memory.h>
+
+#include <boost/multi_index_container.hpp>
+#include <boost/multi_index/ordered_index.hpp>
+
+#include <chrono>
+#include <unordered_map>
+#include <utility>
+
+#include <assert.h>
+
+namespace {
+
+/** The various states a (txhash,peer) pair can be in.
+ *
+ * Note that CANDIDATE is split up into 3 substates (DELAYED, BEST, READY), allowing more efficient implementation.
+ * Also note that the sorting order of ByTxHashView relies on the specific order of values in this enum.
+ *
+ * Expected behaviour is:
+ * - When first announced by a peer, the state is CANDIDATE_DELAYED until reqtime is reached.
+ * - Announcements that have reached their reqtime but not been requested will be either CANDIDATE_READY or
+ * CANDIDATE_BEST. Neither of those has an expiration time; they remain in that state until they're requested or
+ * no longer needed. CANDIDATE_READY announcements are promoted to CANDIDATE_BEST when they're the best one left.
+ * - When requested, an announcement will be in state REQUESTED until expiry is reached.
+ * - If expiry is reached, or the peer replies to the request (either with NOTFOUND or the tx), the state becomes
+ * COMPLETED.
+ */
+enum class State : uint8_t {
+ /** A CANDIDATE announcement whose reqtime is in the future. */
+ CANDIDATE_DELAYED,
+ /** A CANDIDATE announcement that's not CANDIDATE_DELAYED or CANDIDATE_BEST. */
+ CANDIDATE_READY,
+ /** The best CANDIDATE for a given txhash; only if there is no REQUESTED announcement already for that txhash.
+ * The CANDIDATE_BEST is the highest-priority announcement among all CANDIDATE_READY (and _BEST) ones for that
+ * txhash. */
+ CANDIDATE_BEST,
+ /** A REQUESTED announcement. */
+ REQUESTED,
+ /** A COMPLETED announcement. */
+ COMPLETED,
+};
+
+//! Type alias for sequence numbers.
+using SequenceNumber = uint64_t;
+
+/** An announcement. This is the data we track for each txid or wtxid that is announced to us by each peer. */
+struct Announcement {
+ /** Txid or wtxid that was announced. */
+ const uint256 m_txhash;
+ /** For CANDIDATE_{DELAYED,BEST,READY} the reqtime; for REQUESTED the expiry. */
+ std::chrono::microseconds m_time;
+ /** What peer the request was from. */
+ const NodeId m_peer;
+ /** What sequence number this announcement has. */
+ const SequenceNumber m_sequence : 59;
+ /** Whether the request is preferred. */
+ const bool m_preferred : 1;
+ /** Whether this is a wtxid request. */
+ const bool m_is_wtxid : 1;
+
+ /** What state this announcement is in. */
+ State m_state : 3;
+
+ /** Whether this announcement is selected. There can be at most 1 selected peer per txhash. */
+ bool IsSelected() const
+ {
+ return m_state == State::CANDIDATE_BEST || m_state == State::REQUESTED;
+ }
+
+ /** Whether this announcement is waiting for a certain time to pass. */
+ bool IsWaiting() const
+ {
+ return m_state == State::REQUESTED || m_state == State::CANDIDATE_DELAYED;
+ }
+
+ /** Whether this announcement can feasibly be selected if the current IsSelected() one disappears. */
+ bool IsSelectable() const
+ {
+ return m_state == State::CANDIDATE_READY || m_state == State::CANDIDATE_BEST;
+ }
+
+ /** Construct a new announcement from scratch, initially in CANDIDATE_DELAYED state. */
+ Announcement(const GenTxid& gtxid, NodeId peer, bool preferred, std::chrono::microseconds reqtime,
+ SequenceNumber sequence) :
+ m_txhash(gtxid.GetHash()), m_time(reqtime), m_peer(peer), m_sequence(sequence), m_preferred(preferred),
+ m_is_wtxid(gtxid.IsWtxid()), m_state(State::CANDIDATE_DELAYED) {}
+};
+
+//! Type alias for priorities.
+using Priority = uint64_t;
+
+/** A functor with embedded salt that computes priority of an announcement.
+ *
+ * Higher priorities are selected first.
+ */
+class PriorityComputer {
+ const uint64_t m_k0, m_k1;
+public:
+ explicit PriorityComputer(bool deterministic) :
+ m_k0{deterministic ? 0 : GetRand(0xFFFFFFFFFFFFFFFF)},
+ m_k1{deterministic ? 0 : GetRand(0xFFFFFFFFFFFFFFFF)} {}
+
+ Priority operator()(const uint256& txhash, NodeId peer, bool preferred) const
+ {
+ uint64_t low_bits = CSipHasher(m_k0, m_k1).Write(txhash.begin(), txhash.size()).Write(peer).Finalize() >> 1;
+ return low_bits | uint64_t{preferred} << 63;
+ }
+
+ Priority operator()(const Announcement& ann) const
+ {
+ return operator()(ann.m_txhash, ann.m_peer, ann.m_preferred);
+ }
+};
+
+// Definitions for the 3 indexes used in the main data structure.
+//
+// Each index has a By* type to identify it, a By*View data type to represent the view of announcement it is sorted
+// by, and an By*ViewExtractor type to convert an announcement into the By*View type.
+// See https://www.boost.org/doc/libs/1_58_0/libs/multi_index/doc/reference/key_extraction.html#key_extractors
+// for more information about the key extraction concept.
+
+// The ByPeer index is sorted by (peer, state == CANDIDATE_BEST, txhash)
+//
+// Uses:
+// * Looking up existing announcements by peer/txhash, by checking both (peer, false, txhash) and
+// (peer, true, txhash).
+// * Finding all CANDIDATE_BEST announcements for a given peer in GetRequestable.
+struct ByPeer {};
+using ByPeerView = std::tuple<NodeId, bool, const uint256&>;
+struct ByPeerViewExtractor
+{
+ using result_type = ByPeerView;
+ result_type operator()(const Announcement& ann) const
+ {
+ return ByPeerView{ann.m_peer, ann.m_state == State::CANDIDATE_BEST, ann.m_txhash};
+ }
+};
+
+// The ByTxHash index is sorted by (txhash, state, priority).
+//
+// Note: priority == 0 whenever state != CANDIDATE_READY.
+//
+// Uses:
+// * Deleting all announcements with a given txhash in ForgetTxHash.
+// * Finding the best CANDIDATE_READY to convert to CANDIDATE_BEST, when no other CANDIDATE_READY or REQUESTED
+// announcement exists for that txhash.
+// * Determining when no more non-COMPLETED announcements for a given txhash exist, so the COMPLETED ones can be
+// deleted.
+struct ByTxHash {};
+using ByTxHashView = std::tuple<const uint256&, State, Priority>;
+class ByTxHashViewExtractor {
+ const PriorityComputer& m_computer;
+public:
+ ByTxHashViewExtractor(const PriorityComputer& computer) : m_computer(computer) {}
+ using result_type = ByTxHashView;
+ result_type operator()(const Announcement& ann) const
+ {
+ const Priority prio = (ann.m_state == State::CANDIDATE_READY) ? m_computer(ann) : 0;
+ return ByTxHashView{ann.m_txhash, ann.m_state, prio};
+ }
+};
+
+enum class WaitState {
+ //! Used for announcements that need efficient testing of "is their timestamp in the future?".
+ FUTURE_EVENT,
+ //! Used for announcements whose timestamp is not relevant.
+ NO_EVENT,
+ //! Used for announcements that need efficient testing of "is their timestamp in the past?".
+ PAST_EVENT,
+};
+
+WaitState GetWaitState(const Announcement& ann)
+{
+ if (ann.IsWaiting()) return WaitState::FUTURE_EVENT;
+ if (ann.IsSelectable()) return WaitState::PAST_EVENT;
+ return WaitState::NO_EVENT;
+}
+
+// The ByTime index is sorted by (wait_state, time).
+//
+// All announcements with a timestamp in the future can be found by iterating the index forward from the beginning.
+// All announcements with a timestamp in the past can be found by iterating the index backwards from the end.
+//
+// Uses:
+// * Finding CANDIDATE_DELAYED announcements whose reqtime has passed, and REQUESTED announcements whose expiry has
+// passed.
+// * Finding CANDIDATE_READY/BEST announcements whose reqtime is in the future (when the clock time went backwards).
+struct ByTime {};
+using ByTimeView = std::pair<WaitState, std::chrono::microseconds>;
+struct ByTimeViewExtractor
+{
+ using result_type = ByTimeView;
+ result_type operator()(const Announcement& ann) const
+ {
+ return ByTimeView{GetWaitState(ann), ann.m_time};
+ }
+};
+
+/** Data type for the main data structure (Announcement objects with ByPeer/ByTxHash/ByTime indexes). */
+using Index = boost::multi_index_container<
+ Announcement,
+ boost::multi_index::indexed_by<
+ boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor>,
+ boost::multi_index::ordered_non_unique<boost::multi_index::tag<ByTxHash>, ByTxHashViewExtractor>,
+ boost::multi_index::ordered_non_unique<boost::multi_index::tag<ByTime>, ByTimeViewExtractor>
+ >
+>;
+
+/** Helper type to simplify syntax of iterator types. */
+template<typename Tag>
+using Iter = typename Index::index<Tag>::type::iterator;
+
+/** Per-peer statistics object. */
+struct PeerInfo {
+ size_t m_total = 0; //!< Total number of announcements for this peer.
+ size_t m_completed = 0; //!< Number of COMPLETED announcements for this peer.
+ size_t m_requested = 0; //!< Number of REQUESTED announcements for this peer.
+};
+
+/** Per-txhash statistics object. Only used for sanity checking. */
+struct TxHashInfo
+{
+ //! Number of CANDIDATE_DELAYED announcements for this txhash.
+ size_t m_candidate_delayed = 0;
+ //! Number of CANDIDATE_READY announcements for this txhash.
+ size_t m_candidate_ready = 0;
+ //! Number of CANDIDATE_BEST announcements for this txhash (at most one).
+ size_t m_candidate_best = 0;
+ //! Number of REQUESTED announcements for this txhash (at most one; mutually exclusive with CANDIDATE_BEST).
+ size_t m_requested = 0;
+ //! The priority of the CANDIDATE_BEST announcement if one exists, or max() otherwise.
+ Priority m_priority_candidate_best = std::numeric_limits<Priority>::max();
+ //! The highest priority of all CANDIDATE_READY announcements (or min() if none exist).
+ Priority m_priority_best_candidate_ready = std::numeric_limits<Priority>::min();
+ //! All peers we have an announcement for this txhash for.
+ std::vector<NodeId> m_peers;
+};
+
+/** Compare two PeerInfo objects. Only used for sanity checking. */
+bool operator==(const PeerInfo& a, const PeerInfo& b)
+{
+ return std::tie(a.m_total, a.m_completed, a.m_requested) ==
+ std::tie(b.m_total, b.m_completed, b.m_requested);
+};
+
+/** (Re)compute the PeerInfo map from the index. Only used for sanity checking. */
+std::unordered_map<NodeId, PeerInfo> RecomputePeerInfo(const Index& index)
+{
+ std::unordered_map<NodeId, PeerInfo> ret;
+ for (const Announcement& ann : index) {
+ PeerInfo& info = ret[ann.m_peer];
+ ++info.m_total;
+ info.m_requested += (ann.m_state == State::REQUESTED);
+ info.m_completed += (ann.m_state == State::COMPLETED);
+ }
+ return ret;
+}
+
+/** Compute the TxHashInfo map. Only used for sanity checking. */
+std::map<uint256, TxHashInfo> ComputeTxHashInfo(const Index& index, const PriorityComputer& computer)
+{
+ std::map<uint256, TxHashInfo> ret;
+ for (const Announcement& ann : index) {
+ TxHashInfo& info = ret[ann.m_txhash];
+ // Classify how many announcements of each state we have for this txhash.
+ info.m_candidate_delayed += (ann.m_state == State::CANDIDATE_DELAYED);
+ info.m_candidate_ready += (ann.m_state == State::CANDIDATE_READY);
+ info.m_candidate_best += (ann.m_state == State::CANDIDATE_BEST);
+ info.m_requested += (ann.m_state == State::REQUESTED);
+ // And track the priority of the best CANDIDATE_READY/CANDIDATE_BEST announcements.
+ if (ann.m_state == State::CANDIDATE_BEST) {
+ info.m_priority_candidate_best = computer(ann);
+ }
+ if (ann.m_state == State::CANDIDATE_READY) {
+ info.m_priority_best_candidate_ready = std::max(info.m_priority_best_candidate_ready, computer(ann));
+ }
+ // Also keep track of which peers this txhash has an announcement for (so we can detect duplicates).
+ info.m_peers.push_back(ann.m_peer);
+ }
+ return ret;
+}
+
+GenTxid ToGenTxid(const Announcement& ann)
+{
+ return {ann.m_is_wtxid, ann.m_txhash};
+}
+
+} // namespace
+
+/** Actual implementation for TxRequestTracker's data structure. */
+class TxRequestTracker::Impl {
+ //! The current sequence number. Increases for every announcement. This is used to sort txhashes returned by
+ //! GetRequestable in announcement order.
+ SequenceNumber m_current_sequence{0};
+
+ //! This tracker's priority computer.
+ const PriorityComputer m_computer;
+
+ //! This tracker's main data structure. See SanityCheck() for the invariants that apply to it.
+ Index m_index;
+
+ //! Map with this tracker's per-peer statistics.
+ std::unordered_map<NodeId, PeerInfo> m_peerinfo;
+
+public:
+ void SanityCheck() const
+ {
+ // Recompute m_peerdata from m_index. This verifies the data in it as it should just be caching statistics
+ // on m_index. It also verifies the invariant that no PeerInfo announcements with m_total==0 exist.
+ assert(m_peerinfo == RecomputePeerInfo(m_index));
+
+ // Calculate per-txhash statistics from m_index, and validate invariants.
+ for (auto& item : ComputeTxHashInfo(m_index, m_computer)) {
+ TxHashInfo& info = item.second;
+
+ // Cannot have only COMPLETED peer (txhash should have been forgotten already)
+ assert(info.m_candidate_delayed + info.m_candidate_ready + info.m_candidate_best + info.m_requested > 0);
+
+ // Can have at most 1 CANDIDATE_BEST/REQUESTED peer
+ assert(info.m_candidate_best + info.m_requested <= 1);
+
+ // If there are any CANDIDATE_READY announcements, there must be exactly one CANDIDATE_BEST or REQUESTED
+ // announcement.
+ if (info.m_candidate_ready > 0) {
+ assert(info.m_candidate_best + info.m_requested == 1);
+ }
+
+ // If there is both a CANDIDATE_READY and a CANDIDATE_BEST announcement, the CANDIDATE_BEST one must be
+ // at least as good (equal or higher priority) as the best CANDIDATE_READY.
+ if (info.m_candidate_ready && info.m_candidate_best) {
+ assert(info.m_priority_candidate_best >= info.m_priority_best_candidate_ready);
+ }
+
+ // No txhash can have been announced by the same peer twice.
+ std::sort(info.m_peers.begin(), info.m_peers.end());
+ assert(std::adjacent_find(info.m_peers.begin(), info.m_peers.end()) == info.m_peers.end());
+ }
+ }
+
+ void PostGetRequestableSanityCheck(std::chrono::microseconds now) const
+ {
+ for (const Announcement& ann : m_index) {
+ if (ann.IsWaiting()) {
+ // REQUESTED and CANDIDATE_DELAYED must have a time in the future (they should have been converted
+ // to COMPLETED/CANDIDATE_READY respectively).
+ assert(ann.m_time > now);
+ } else if (ann.IsSelectable()) {
+ // CANDIDATE_READY and CANDIDATE_BEST cannot have a time in the future (they should have remained
+ // CANDIDATE_DELAYED, or should have been converted back to it if time went backwards).
+ assert(ann.m_time <= now);
+ }
+ }
+ }
+
+private:
+ //! Wrapper around Index::...::erase that keeps m_peerinfo up to date.
+ template<typename Tag>
+ Iter<Tag> Erase(Iter<Tag> it)
+ {
+ auto peerit = m_peerinfo.find(it->m_peer);
+ peerit->second.m_completed -= it->m_state == State::COMPLETED;
+ peerit->second.m_requested -= it->m_state == State::REQUESTED;
+ if (--peerit->second.m_total == 0) m_peerinfo.erase(peerit);
+ return m_index.get<Tag>().erase(it);
+ }
+
+ //! Wrapper around Index::...::modify that keeps m_peerinfo up to date.
+ template<typename Tag, typename Modifier>
+ void Modify(Iter<Tag> it, Modifier modifier)
+ {
+ auto peerit = m_peerinfo.find(it->m_peer);
+ peerit->second.m_completed -= it->m_state == State::COMPLETED;
+ peerit->second.m_requested -= it->m_state == State::REQUESTED;
+ m_index.get<Tag>().modify(it, std::move(modifier));
+ peerit->second.m_completed += it->m_state == State::COMPLETED;
+ peerit->second.m_requested += it->m_state == State::REQUESTED;
+ }
+
+ //! Convert a CANDIDATE_DELAYED announcement into a CANDIDATE_READY. If this makes it the new best
+ //! CANDIDATE_READY (and no REQUESTED exists) and better than the CANDIDATE_BEST (if any), it becomes the new
+ //! CANDIDATE_BEST.
+ void PromoteCandidateReady(Iter<ByTxHash> it)
+ {
+ assert(it != m_index.get<ByTxHash>().end());
+ assert(it->m_state == State::CANDIDATE_DELAYED);
+ // Convert CANDIDATE_DELAYED to CANDIDATE_READY first.
+ Modify<ByTxHash>(it, [](Announcement& ann){ ann.m_state = State::CANDIDATE_READY; });
+ // The following code relies on the fact that the ByTxHash is sorted by txhash, and then by state (first
+ // _DELAYED, then _READY, then _BEST/REQUESTED). Within the _READY announcements, the best one (highest
+ // priority) comes last. Thus, if an existing _BEST exists for the same txhash that this announcement may
+ // be preferred over, it must immediately follow the newly created _READY.
+ auto it_next = std::next(it);
+ if (it_next == m_index.get<ByTxHash>().end() || it_next->m_txhash != it->m_txhash ||
+ it_next->m_state == State::COMPLETED) {
+ // This is the new best CANDIDATE_READY, and there is no IsSelected() announcement for this txhash
+ // already.
+ Modify<ByTxHash>(it, [](Announcement& ann){ ann.m_state = State::CANDIDATE_BEST; });
+ } else if (it_next->m_state == State::CANDIDATE_BEST) {
+ Priority priority_old = m_computer(*it_next);
+ Priority priority_new = m_computer(*it);
+ if (priority_new > priority_old) {
+ // There is a CANDIDATE_BEST announcement already, but this one is better.
+ Modify<ByTxHash>(it_next, [](Announcement& ann){ ann.m_state = State::CANDIDATE_READY; });
+ Modify<ByTxHash>(it, [](Announcement& ann){ ann.m_state = State::CANDIDATE_BEST; });
+ }
+ }
+ }
+
+ //! Change the state of an announcement to something non-IsSelected(). If it was IsSelected(), the next best
+ //! announcement will be marked CANDIDATE_BEST.
+ void ChangeAndReselect(Iter<ByTxHash> it, State new_state)
+ {
+ assert(new_state == State::COMPLETED || new_state == State::CANDIDATE_DELAYED);
+ assert(it != m_index.get<ByTxHash>().end());
+ if (it->IsSelected() && it != m_index.get<ByTxHash>().begin()) {
+ auto it_prev = std::prev(it);
+ // The next best CANDIDATE_READY, if any, immediately precedes the REQUESTED or CANDIDATE_BEST
+ // announcement in the ByTxHash index.
+ if (it_prev->m_txhash == it->m_txhash && it_prev->m_state == State::CANDIDATE_READY) {
+ // If one such CANDIDATE_READY exists (for this txhash), convert it to CANDIDATE_BEST.
+ Modify<ByTxHash>(it_prev, [](Announcement& ann){ ann.m_state = State::CANDIDATE_BEST; });
+ }
+ }
+ Modify<ByTxHash>(it, [new_state](Announcement& ann){ ann.m_state = new_state; });
+ }
+
+ //! Check if 'it' is the only announcement for a given txhash that isn't COMPLETED.
+ bool IsOnlyNonCompleted(Iter<ByTxHash> it)
+ {
+ assert(it != m_index.get<ByTxHash>().end());
+ assert(it->m_state != State::COMPLETED); // Not allowed to call this on COMPLETED announcements.
+
+ // This announcement has a predecessor that belongs to the same txhash. Due to ordering, and the
+ // fact that 'it' is not COMPLETED, its predecessor cannot be COMPLETED here.
+ if (it != m_index.get<ByTxHash>().begin() && std::prev(it)->m_txhash == it->m_txhash) return false;
+
+ // This announcement has a successor that belongs to the same txhash, and is not COMPLETED.
+ if (std::next(it) != m_index.get<ByTxHash>().end() && std::next(it)->m_txhash == it->m_txhash &&
+ std::next(it)->m_state != State::COMPLETED) return false;
+
+ return true;
+ }
+
+ /** Convert any announcement to a COMPLETED one. If there are no non-COMPLETED announcements left for this
+ * txhash, they are deleted. If this was a REQUESTED announcement, and there are other CANDIDATEs left, the
+ * best one is made CANDIDATE_BEST. Returns whether the announcement still exists. */
+ bool MakeCompleted(Iter<ByTxHash> it)
+ {
+ assert(it != m_index.get<ByTxHash>().end());
+
+ // Nothing to be done if it's already COMPLETED.
+ if (it->m_state == State::COMPLETED) return true;
+
+ if (IsOnlyNonCompleted(it)) {
+ // This is the last non-COMPLETED announcement for this txhash. Delete all.
+ uint256 txhash = it->m_txhash;
+ do {
+ it = Erase<ByTxHash>(it);
+ } while (it != m_index.get<ByTxHash>().end() && it->m_txhash == txhash);
+ return false;
+ }
+
+ // Mark the announcement COMPLETED, and select the next best announcement (the first CANDIDATE_READY) if
+ // needed.
+ ChangeAndReselect(it, State::COMPLETED);
+
+ return true;
+ }
+
+ //! Make the data structure consistent with a given point in time:
+ //! - REQUESTED annoucements with expiry <= now are turned into COMPLETED.
+ //! - CANDIDATE_DELAYED announcements with reqtime <= now are turned into CANDIDATE_{READY,BEST}.
+ //! - CANDIDATE_{READY,BEST} announcements with reqtime > now are turned into CANDIDATE_DELAYED.
+ void SetTimePoint(std::chrono::microseconds now, std::vector<std::pair<NodeId, GenTxid>>* expired)
+ {
+ if (expired) expired->clear();
+
+ // Iterate over all CANDIDATE_DELAYED and REQUESTED from old to new, as long as they're in the past,
+ // and convert them to CANDIDATE_READY and COMPLETED respectively.
+ while (!m_index.empty()) {
+ auto it = m_index.get<ByTime>().begin();
+ if (it->m_state == State::CANDIDATE_DELAYED && it->m_time <= now) {
+ PromoteCandidateReady(m_index.project<ByTxHash>(it));
+ } else if (it->m_state == State::REQUESTED && it->m_time <= now) {
+ if (expired) expired->emplace_back(it->m_peer, ToGenTxid(*it));
+ MakeCompleted(m_index.project<ByTxHash>(it));
+ } else {
+ break;
+ }
+ }
+
+ while (!m_index.empty()) {
+ // If time went backwards, we may need to demote CANDIDATE_BEST and CANDIDATE_READY announcements back
+ // to CANDIDATE_DELAYED. This is an unusual edge case, and unlikely to matter in production. However,
+ // it makes it much easier to specify and test TxRequestTracker::Impl's behaviour.
+ auto it = std::prev(m_index.get<ByTime>().end());
+ if (it->IsSelectable() && it->m_time > now) {
+ ChangeAndReselect(m_index.project<ByTxHash>(it), State::CANDIDATE_DELAYED);
+ } else {
+ break;
+ }
+ }
+ }
+
+public:
+ Impl(bool deterministic) :
+ m_computer(deterministic),
+ // Explicitly initialize m_index as we need to pass a reference to m_computer to ByTxHashViewExtractor.
+ m_index(boost::make_tuple(
+ boost::make_tuple(ByPeerViewExtractor(), std::less<ByPeerView>()),
+ boost::make_tuple(ByTxHashViewExtractor(m_computer), std::less<ByTxHashView>()),
+ boost::make_tuple(ByTimeViewExtractor(), std::less<ByTimeView>())
+ )) {}
+
+ // Disable copying and assigning (a default copy won't work due the stateful ByTxHashViewExtractor).
+ Impl(const Impl&) = delete;
+ Impl& operator=(const Impl&) = delete;
+
+ void DisconnectedPeer(NodeId peer)
+ {
+ auto& index = m_index.get<ByPeer>();
+ auto it = index.lower_bound(ByPeerView{peer, false, uint256::ZERO});
+ while (it != index.end() && it->m_peer == peer) {
+ // Check what to continue with after this iteration. 'it' will be deleted in what follows, so we need to
+ // decide what to continue with afterwards. There are a number of cases to consider:
+ // - std::next(it) is end() or belongs to a different peer. In that case, this is the last iteration
+ // of the loop (denote this by setting it_next to end()).
+ // - 'it' is not the only non-COMPLETED announcement for its txhash. This means it will be deleted, but
+ // no other Announcement objects will be modified. Continue with std::next(it) if it belongs to the
+ // same peer, but decide this ahead of time (as 'it' may change position in what follows).
+ // - 'it' is the only non-COMPLETED announcement for its txhash. This means it will be deleted along
+ // with all other announcements for the same txhash - which may include std::next(it). However, other
+ // than 'it', no announcements for the same peer can be affected (due to (peer, txhash) uniqueness).
+ // In other words, the situation where std::next(it) is deleted can only occur if std::next(it)
+ // belongs to a different peer but the same txhash as 'it'. This is covered by the first bulletpoint
+ // already, and we'll have set it_next to end().
+ auto it_next = (std::next(it) == index.end() || std::next(it)->m_peer != peer) ? index.end() :
+ std::next(it);
+ // If the announcement isn't already COMPLETED, first make it COMPLETED (which will mark other
+ // CANDIDATEs as CANDIDATE_BEST, or delete all of a txhash's announcements if no non-COMPLETED ones are
+ // left).
+ if (MakeCompleted(m_index.project<ByTxHash>(it))) {
+ // Then actually delete the announcement (unless it was already deleted by MakeCompleted).
+ Erase<ByPeer>(it);
+ }
+ it = it_next;
+ }
+ }
+
+ void ForgetTxHash(const uint256& txhash)
+ {
+ auto it = m_index.get<ByTxHash>().lower_bound(ByTxHashView{txhash, State::CANDIDATE_DELAYED, 0});
+ while (it != m_index.get<ByTxHash>().end() && it->m_txhash == txhash) {
+ it = Erase<ByTxHash>(it);
+ }
+ }
+
+ void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred,
+ std::chrono::microseconds reqtime)
+ {
+ // Bail out if we already have a CANDIDATE_BEST announcement for this (txhash, peer) combination. The case
+ // where there is a non-CANDIDATE_BEST announcement already will be caught by the uniqueness property of the
+ // ByPeer index when we try to emplace the new object below.
+ if (m_index.get<ByPeer>().count(ByPeerView{peer, true, gtxid.GetHash()})) return;
+
+ // Try creating the announcement with CANDIDATE_DELAYED state (which will fail due to the uniqueness
+ // of the ByPeer index if a non-CANDIDATE_BEST announcement already exists with the same txhash and peer).
+ // Bail out in that case.
+ auto ret = m_index.get<ByPeer>().emplace(gtxid, peer, preferred, reqtime, m_current_sequence);
+ if (!ret.second) return;
+
+ // Update accounting metadata.
+ ++m_peerinfo[peer].m_total;
+ ++m_current_sequence;
+ }
+
+ //! Find the GenTxids to request now from peer.
+ std::vector<GenTxid> GetRequestable(NodeId peer, std::chrono::microseconds now,
+ std::vector<std::pair<NodeId, GenTxid>>* expired)
+ {
+ // Move time.
+ SetTimePoint(now, expired);
+
+ // Find all CANDIDATE_BEST announcements for this peer.
+ std::vector<const Announcement*> selected;
+ auto it_peer = m_index.get<ByPeer>().lower_bound(ByPeerView{peer, true, uint256::ZERO});
+ while (it_peer != m_index.get<ByPeer>().end() && it_peer->m_peer == peer &&
+ it_peer->m_state == State::CANDIDATE_BEST) {
+ selected.emplace_back(&*it_peer);
+ ++it_peer;
+ }
+
+ // Sort by sequence number.
+ std::sort(selected.begin(), selected.end(), [](const Announcement* a, const Announcement* b) {
+ return a->m_sequence < b->m_sequence;
+ });
+
+ // Convert to GenTxid and return.
+ std::vector<GenTxid> ret;
+ ret.reserve(selected.size());
+ std::transform(selected.begin(), selected.end(), std::back_inserter(ret), [](const Announcement* ann) {
+ return ToGenTxid(*ann);
+ });
+ return ret;
+ }
+
+ void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds expiry)
+ {
+ auto it = m_index.get<ByPeer>().find(ByPeerView{peer, true, txhash});
+ if (it == m_index.get<ByPeer>().end()) {
+ // There is no CANDIDATE_BEST announcement, look for a _READY or _DELAYED instead. If the caller only
+ // ever invokes RequestedTx with the values returned by GetRequestable, and no other non-const functions
+ // other than ForgetTxHash and GetRequestable in between, this branch will never execute (as txhashes
+ // returned by GetRequestable always correspond to CANDIDATE_BEST announcements).
+
+ it = m_index.get<ByPeer>().find(ByPeerView{peer, false, txhash});
+ if (it == m_index.get<ByPeer>().end() || (it->m_state != State::CANDIDATE_DELAYED &&
+ it->m_state != State::CANDIDATE_READY)) {
+ // There is no CANDIDATE announcement tracked for this peer, so we have nothing to do. Either this
+ // txhash wasn't tracked at all (and the caller should have called ReceivedInv), or it was already
+ // requested and/or completed for other reasons and this is just a superfluous RequestedTx call.
+ return;
+ }
+
+ // Look for an existing CANDIDATE_BEST or REQUESTED with the same txhash. We only need to do this if the
+ // found announcement had a different state than CANDIDATE_BEST. If it did, invariants guarantee that no
+ // other CANDIDATE_BEST or REQUESTED can exist.
+ auto it_old = m_index.get<ByTxHash>().lower_bound(ByTxHashView{txhash, State::CANDIDATE_BEST, 0});
+ if (it_old != m_index.get<ByTxHash>().end() && it_old->m_txhash == txhash) {
+ if (it_old->m_state == State::CANDIDATE_BEST) {
+ // The data structure's invariants require that there can be at most one CANDIDATE_BEST or one
+ // REQUESTED announcement per txhash (but not both simultaneously), so we have to convert any
+ // existing CANDIDATE_BEST to another CANDIDATE_* when constructing another REQUESTED.
+ // It doesn't matter whether we pick CANDIDATE_READY or _DELAYED here, as SetTimePoint()
+ // will correct it at GetRequestable() time. If time only goes forward, it will always be
+ // _READY, so pick that to avoid extra work in SetTimePoint().
+ Modify<ByTxHash>(it_old, [](Announcement& ann) { ann.m_state = State::CANDIDATE_READY; });
+ } else if (it_old->m_state == State::REQUESTED) {
+ // As we're no longer waiting for a response to the previous REQUESTED announcement, convert it
+ // to COMPLETED. This also helps guaranteeing progress.
+ Modify<ByTxHash>(it_old, [](Announcement& ann) { ann.m_state = State::COMPLETED; });
+ }
+ }
+ }
+
+ Modify<ByPeer>(it, [expiry](Announcement& ann) {
+ ann.m_state = State::REQUESTED;
+ ann.m_time = expiry;
+ });
+ }
+
+ void ReceivedResponse(NodeId peer, const uint256& txhash)
+ {
+ // We need to search the ByPeer index for both (peer, false, txhash) and (peer, true, txhash).
+ auto it = m_index.get<ByPeer>().find(ByPeerView{peer, false, txhash});
+ if (it == m_index.get<ByPeer>().end()) {
+ it = m_index.get<ByPeer>().find(ByPeerView{peer, true, txhash});
+ }
+ if (it != m_index.get<ByPeer>().end()) MakeCompleted(m_index.project<ByTxHash>(it));
+ }
+
+ size_t CountInFlight(NodeId peer) const
+ {
+ auto it = m_peerinfo.find(peer);
+ if (it != m_peerinfo.end()) return it->second.m_requested;
+ return 0;
+ }
+
+ size_t CountCandidates(NodeId peer) const
+ {
+ auto it = m_peerinfo.find(peer);
+ if (it != m_peerinfo.end()) return it->second.m_total - it->second.m_requested - it->second.m_completed;
+ return 0;
+ }
+
+ size_t Count(NodeId peer) const
+ {
+ auto it = m_peerinfo.find(peer);
+ if (it != m_peerinfo.end()) return it->second.m_total;
+ return 0;
+ }
+
+ //! Count how many announcements are being tracked in total across all peers and transactions.
+ size_t Size() const { return m_index.size(); }
+
+ uint64_t ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const
+ {
+ // Return Priority as a uint64_t as Priority is internal.
+ return uint64_t{m_computer(txhash, peer, preferred)};
+ }
+
+};
+
+TxRequestTracker::TxRequestTracker(bool deterministic) :
+ m_impl{MakeUnique<TxRequestTracker::Impl>(deterministic)} {}
+
+TxRequestTracker::~TxRequestTracker() = default;
+
+void TxRequestTracker::ForgetTxHash(const uint256& txhash) { m_impl->ForgetTxHash(txhash); }
+void TxRequestTracker::DisconnectedPeer(NodeId peer) { m_impl->DisconnectedPeer(peer); }
+size_t TxRequestTracker::CountInFlight(NodeId peer) const { return m_impl->CountInFlight(peer); }
+size_t TxRequestTracker::CountCandidates(NodeId peer) const { return m_impl->CountCandidates(peer); }
+size_t TxRequestTracker::Count(NodeId peer) const { return m_impl->Count(peer); }
+size_t TxRequestTracker::Size() const { return m_impl->Size(); }
+void TxRequestTracker::SanityCheck() const { m_impl->SanityCheck(); }
+
+void TxRequestTracker::PostGetRequestableSanityCheck(std::chrono::microseconds now) const
+{
+ m_impl->PostGetRequestableSanityCheck(now);
+}
+
+void TxRequestTracker::ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred,
+ std::chrono::microseconds reqtime)
+{
+ m_impl->ReceivedInv(peer, gtxid, preferred, reqtime);
+}
+
+void TxRequestTracker::RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds expiry)
+{
+ m_impl->RequestedTx(peer, txhash, expiry);
+}
+
+void TxRequestTracker::ReceivedResponse(NodeId peer, const uint256& txhash)
+{
+ m_impl->ReceivedResponse(peer, txhash);
+}
+
+std::vector<GenTxid> TxRequestTracker::GetRequestable(NodeId peer, std::chrono::microseconds now,
+ std::vector<std::pair<NodeId, GenTxid>>* expired)
+{
+ return m_impl->GetRequestable(peer, now, expired);
+}
+
+uint64_t TxRequestTracker::ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const
+{
+ return m_impl->ComputePriority(txhash, peer, preferred);
+}
diff --git a/src/txrequest.h b/src/txrequest.h
new file mode 100644
index 0000000000..cd3042c87e
--- /dev/null
+++ b/src/txrequest.h
@@ -0,0 +1,211 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TXREQUEST_H
+#define BITCOIN_TXREQUEST_H
+
+#include <primitives/transaction.h>
+#include <net.h> // For NodeId
+#include <uint256.h>
+
+#include <chrono>
+#include <vector>
+
+#include <stdint.h>
+
+/** Data structure to keep track of, and schedule, transaction downloads from peers.
+ *
+ * === Specification ===
+ *
+ * We keep track of which peers have announced which transactions, and use that to determine which requests
+ * should go to which peer, when, and in what order.
+ *
+ * The following information is tracked per peer/tx combination ("announcement"):
+ * - Which peer announced it (through their NodeId)
+ * - The txid or wtxid of the transaction (collectively called "txhash" in what follows)
+ * - Whether it was a tx or wtx announcement (see BIP339).
+ * - What the earliest permitted time is that that transaction can be requested from that peer (called "reqtime").
+ * - Whether it's from a "preferred" peer or not. Which announcements get this flag is determined by the caller, but
+ * this is designed for outbound peers, or other peers that we have a higher level of trust in. Even when the
+ * peers' preferredness changes, the preferred flag of existing announcements from that peer won't change.
+ * - Whether or not the transaction was requested already, and if so, when it times out (called "expiry").
+ * - Whether or not the transaction request failed already (timed out, or invalid transaction or NOTFOUND was
+ * received).
+ *
+ * Transaction requests are then assigned to peers, following these rules:
+ *
+ * - No transaction is requested as long as another request for the same txhash is outstanding (it needs to fail
+ * first by passing expiry, or a NOTFOUND or invalid transaction has to be received for it).
+ *
+ * Rationale: to avoid wasting bandwidth on multiple copies of the same transaction. Note that this only works
+ * per txhash, so if the same transaction is announced both through txid and wtxid, we have no means
+ * to prevent fetching both (the caller can however mitigate this by delaying one, see further).
+ *
+ * - The same transaction is never requested twice from the same peer, unless the announcement was forgotten in
+ * between, and re-announced. Announcements are forgotten only:
+ * - If a peer goes offline, all its announcements are forgotten.
+ * - If a transaction has been successfully received, or is otherwise no longer needed, the caller can call
+ * ForgetTxHash, which removes all announcements across all peers with the specified txhash.
+ * - If for a given txhash only already-failed announcements remain, they are all forgotten.
+ *
+ * Rationale: giving a peer multiple chances to announce a transaction would allow them to bias requests in their
+ * favor, worsening transaction censoring attacks. The flip side is that as long as an attacker manages
+ * to prevent us from receiving a transaction, failed announcements (including those from honest peers)
+ * will linger longer, increasing memory usage somewhat. The impact of this is limited by imposing a
+ * cap on the number of tracked announcements per peer. As failed requests in response to announcements
+ * from honest peers should be rare, this almost solely hinders attackers.
+ * Transaction censoring attacks can be done by announcing transactions quickly while not answering
+ * requests for them. See https://allquantor.at/blockchainbib/pdf/miller2015topology.pdf for more
+ * information.
+ *
+ * - Transactions are not requested from a peer until its reqtime has passed.
+ *
+ * Rationale: enable the calling code to define a delay for less-than-ideal peers, so that (presumed) better
+ * peers have a chance to give their announcement first.
+ *
+ * - If multiple viable candidate peers exist according to the above rules, pick a peer as follows:
+ *
+ * - If any preferred peers are available, non-preferred peers are not considered for what follows.
+ *
+ * Rationale: preferred peers are more trusted by us, so are less likely to be under attacker control.
+ *
+ * - Pick a uniformly random peer among the candidates.
+ *
+ * Rationale: random assignments are hard to influence for attackers.
+ *
+ * Together these rules strike a balance between being fast in non-adverserial conditions and minimizing
+ * susceptibility to censorship attacks. An attacker that races the network:
+ * - Will be unsuccessful if all preferred connections are honest (and there is at least one preferred connection).
+ * - If there are P preferred connections of which Ph>=1 are honest, the attacker can delay us from learning
+ * about a transaction by k expiration periods, where k ~ 1 + NHG(N=P-1,K=P-Ph-1,r=1), which has mean
+ * P/(Ph+1) (where NHG stands for Negative Hypergeometric distribution). The "1 +" is due to the fact that the
+ * attacker can be the first to announce through a preferred connection in this scenario, which very likely means
+ * they get the first request.
+ * - If all P preferred connections are to the attacker, and there are NP non-preferred connections of which NPh>=1
+ * are honest, where we assume that the attacker can disconnect and reconnect those connections, the distribution
+ * becomes k ~ P + NB(p=1-NPh/NP,r=1) (where NB stands for Negative Binomial distribution), which has mean
+ * P-1+NP/NPh.
+ *
+ * Complexity:
+ * - Memory usage is proportional to the total number of tracked announcements (Size()) plus the number of
+ * peers with a nonzero number of tracked announcements.
+ * - CPU usage is generally logarithmic in the total number of tracked announcements, plus the number of
+ * announcements affected by an operation (amortized O(1) per announcement).
+ */
+class TxRequestTracker {
+ // Avoid littering this header file with implementation details.
+ class Impl;
+ const std::unique_ptr<Impl> m_impl;
+
+public:
+ //! Construct a TxRequestTracker.
+ explicit TxRequestTracker(bool deterministic = false);
+ ~TxRequestTracker();
+
+ // Conceptually, the data structure consists of a collection of "announcements", one for each peer/txhash
+ // combination:
+ //
+ // - CANDIDATE announcements represent transactions that were announced by a peer, and that become available for
+ // download after their reqtime has passed.
+ //
+ // - REQUESTED announcements represent transactions that have been requested, and which we're awaiting a
+ // response for from that peer. Their expiry value determines when the request times out.
+ //
+ // - COMPLETED announcements represent transactions that have been requested from a peer, and a NOTFOUND or a
+ // transaction was received in response (valid or not), or they timed out. They're only kept around to
+ // prevent requesting them again. If only COMPLETED announcements for a given txhash remain (so no CANDIDATE
+ // or REQUESTED ones), all of them are deleted (this is an invariant, and maintained by all operations below).
+ //
+ // The operations below manipulate the data structure.
+
+ /** Adds a new CANDIDATE announcement.
+ *
+ * Does nothing if one already exists for that (txhash, peer) combination (whether it's CANDIDATE, REQUESTED, or
+ * COMPLETED). Note that the txid/wtxid property is ignored for determining uniqueness, so if an announcement
+ * is added for a wtxid H, while one for txid H from the same peer already exists, it will be ignored. This is
+ * harmless as the txhashes being equal implies it is a non-segwit transaction, so it doesn't matter how it is
+ * fetched. The new announcement is given the specified preferred and reqtime values, and takes its is_wtxid
+ * from the specified gtxid.
+ */
+ void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool preferred,
+ std::chrono::microseconds reqtime);
+
+ /** Deletes all announcements for a given peer.
+ *
+ * It should be called when a peer goes offline.
+ */
+ void DisconnectedPeer(NodeId peer);
+
+ /** Deletes all announcements for a given txhash (both txid and wtxid ones).
+ *
+ * This should be called when a transaction is no longer needed. The caller should ensure that new announcements
+ * for the same txhash will not trigger new ReceivedInv calls, at least in the short term after this call.
+ */
+ void ForgetTxHash(const uint256& txhash);
+
+ /** Find the txids to request now from peer.
+ *
+ * It does the following:
+ * - Convert all REQUESTED announcements (for all txhashes/peers) with (expiry <= now) to COMPLETED ones.
+ * These are returned in expired, if non-nullptr.
+ * - Requestable announcements are selected: CANDIDATE announcements from the specified peer with
+ * (reqtime <= now) for which no existing REQUESTED announcement with the same txhash from a different peer
+ * exists, and for which the specified peer is the best choice among all (reqtime <= now) CANDIDATE
+ * announcements with the same txhash (subject to preferredness rules, and tiebreaking using a deterministic
+ * salted hash of peer and txhash).
+ * - The selected announcements are converted to GenTxids using their is_wtxid flag, and returned in
+ * announcement order (even if multiple were added at the same time, or when the clock went backwards while
+ * they were being added). This is done to minimize disruption from dependent transactions being requested
+ * out of order: if multiple dependent transactions are announced simultaneously by one peer, and end up
+ * being requested from them, the requests will happen in announcement order.
+ */
+ std::vector<GenTxid> GetRequestable(NodeId peer, std::chrono::microseconds now,
+ std::vector<std::pair<NodeId, GenTxid>>* expired = nullptr);
+
+ /** Marks a transaction as requested, with a specified expiry.
+ *
+ * If no CANDIDATE announcement for the provided peer and txhash exists, this call has no effect. Otherwise:
+ * - That announcement is converted to REQUESTED.
+ * - If any other REQUESTED announcement for the same txhash already existed, it means an unexpected request
+ * was made (GetRequestable will never advise doing so). In this case it is converted to COMPLETED, as we're
+ * no longer waiting for a response to it.
+ */
+ void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds expiry);
+
+ /** Converts a CANDIDATE or REQUESTED announcement to a COMPLETED one. If no such announcement exists for the
+ * provided peer and txhash, nothing happens.
+ *
+ * It should be called whenever a transaction or NOTFOUND was received from a peer. When the transaction is
+ * not needed entirely anymore, ForgetTxhash should be called instead of, or in addition to, this call.
+ */
+ void ReceivedResponse(NodeId peer, const uint256& txhash);
+
+ // The operations below inspect the data structure.
+
+ /** Count how many REQUESTED announcements a peer has. */
+ size_t CountInFlight(NodeId peer) const;
+
+ /** Count how many CANDIDATE announcements a peer has. */
+ size_t CountCandidates(NodeId peer) const;
+
+ /** Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined). */
+ size_t Count(NodeId peer) const;
+
+ /** Count how many announcements are being tracked in total across all peers and transaction hashes. */
+ size_t Size() const;
+
+ /** Access to the internal priority computation (testing only) */
+ uint64_t ComputePriority(const uint256& txhash, NodeId peer, bool preferred) const;
+
+ /** Run internal consistency check (testing only). */
+ void SanityCheck() const;
+
+ /** Run a time-dependent internal consistency check (testing only).
+ *
+ * This can only be called immediately after GetRequestable, with the same 'now' parameter.
+ */
+ void PostGetRequestableSanityCheck(std::chrono::microseconds now) const;
+};
+
+#endif // BITCOIN_TXREQUEST_H
diff --git a/src/uint256.cpp b/src/uint256.cpp
index d074df2f20..f358b62903 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -80,4 +80,5 @@ template std::string base_blob<256>::ToString() const;
template void base_blob<256>::SetHex(const char*);
template void base_blob<256>::SetHex(const std::string&);
+const uint256 uint256::ZERO(0);
const uint256 uint256::ONE(1);
diff --git a/src/uint256.h b/src/uint256.h
index c55cb31456..ceae70707e 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -126,6 +126,7 @@ public:
constexpr uint256() {}
constexpr explicit uint256(uint8_t v) : base_blob<256>(v) {}
explicit uint256(const std::vector<unsigned char>& vch) : base_blob<256>(vch) {}
+ static const uint256 ZERO;
static const uint256 ONE;
};
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 5c3f021b3f..16d9302db8 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -42,15 +42,15 @@ class TestP2PConn(P2PInterface):
# Constants from net_processing
GETDATA_TX_INTERVAL = 60 # seconds
-MAX_GETDATA_RANDOM_DELAY = 2 # seconds
INBOUND_PEER_TX_DELAY = 2 # seconds
TXID_RELAY_DELAY = 2 # seconds
+OVERLOADED_PEER_DELAY = 2 # seconds
MAX_GETDATA_IN_FLIGHT = 100
-TX_EXPIRY_INTERVAL = GETDATA_TX_INTERVAL * 10
+MAX_PEER_TX_ANNOUNCEMENTS = 5000
# Python test constants
NUM_INBOUND = 10
-MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY
+MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY
class TxDownloadTest(BitcoinTestFramework):
@@ -121,14 +121,12 @@ class TxDownloadTest(BitcoinTestFramework):
# * the first time it is re-requested from the outbound peer, plus
# * 2 seconds to avoid races
assert self.nodes[1].getpeerinfo()[0]['inbound'] == False
- timeout = 2 + (MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY) + (
- GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY)
+ timeout = 2 + INBOUND_PEER_TX_DELAY + GETDATA_TX_INTERVAL
self.log.info("Tx should be received at node 1 after {} seconds".format(timeout))
self.sync_mempools(timeout=timeout)
def test_in_flight_max(self):
- self.log.info("Test that we don't request more than {} transactions from any peer, every {} minutes".format(
- MAX_GETDATA_IN_FLIGHT, TX_EXPIRY_INTERVAL / 60))
+ self.log.info("Test that we don't load peers with more than {} transaction requests immediately".format(MAX_GETDATA_IN_FLIGHT))
txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)]
p = self.nodes[0].p2ps[0]
@@ -136,31 +134,120 @@ class TxDownloadTest(BitcoinTestFramework):
with p2p_lock:
p.tx_getdata_count = 0
- p.send_message(msg_inv([CInv(t=MSG_WTX, h=i) for i in txids]))
+ mock_time = int(time.time() + 1)
+ self.nodes[0].setmocktime(mock_time)
+ for i in range(MAX_GETDATA_IN_FLIGHT):
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])]))
+ p.sync_with_ping()
+ mock_time += INBOUND_PEER_TX_DELAY
+ self.nodes[0].setmocktime(mock_time)
p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT)
+ for i in range(MAX_GETDATA_IN_FLIGHT, len(txids)):
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])]))
+ p.sync_with_ping()
+ self.log.info("No more than {} requests should be seen within {} seconds after announcement".format(MAX_GETDATA_IN_FLIGHT, INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1))
+ self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1)
+ p.sync_with_ping()
with p2p_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
+ self.log.info("If we wait {} seconds after announcement, we should eventually get more requests".format(INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY))
+ self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY)
+ p.wait_until(lambda: p.tx_getdata_count == len(txids))
- self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request")
- p.send_message(msg_notfound(vec=[CInv(t=MSG_WTX, h=txids[0])]))
- p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10)
+ def test_expiry_fallback(self):
+ self.log.info('Check that expiry will select another peer for download')
+ WTXID = 0xffaa
+ peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ for p in [peer1, peer2]:
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
+ # One of the peers is asked for the tx
+ peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
with p2p_lock:
- assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1)
+ peer_expiry, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ assert_equal(peer_fallback.tx_getdata_count, 0)
+ self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) # Wait for request to peer_expiry to expire
+ peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer_fallback.tx_getdata_count, 1)
+ self.restart_node(0) # reset mocktime
- WAIT_TIME = TX_EXPIRY_INTERVAL // 2 + TX_EXPIRY_INTERVAL
- self.log.info("if we wait about {} minutes, we should eventually get more requests".format(WAIT_TIME / 60))
- self.nodes[0].setmocktime(int(time.time() + WAIT_TIME))
- p.wait_until(lambda: p.tx_getdata_count == MAX_GETDATA_IN_FLIGHT + 2)
- self.nodes[0].setmocktime(0)
+ def test_disconnect_fallback(self):
+ self.log.info('Check that disconnect will select another peer for download')
+ WTXID = 0xffbb
+ peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ for p in [peer1, peer2]:
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
+ # One of the peers is asked for the tx
+ peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
+ with p2p_lock:
+ peer_disconnect, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ assert_equal(peer_fallback.tx_getdata_count, 0)
+ peer_disconnect.peer_disconnect()
+ peer_disconnect.wait_for_disconnect()
+ peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer_fallback.tx_getdata_count, 1)
+
+ def test_notfound_fallback(self):
+ self.log.info('Check that notfounds will select another peer for download immediately')
+ WTXID = 0xffdd
+ peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
+ for p in [peer1, peer2]:
+ p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
+ # One of the peers is asked for the tx
+ peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
+ with p2p_lock:
+ peer_notfound, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
+ assert_equal(peer_fallback.tx_getdata_count, 0)
+ peer_notfound.send_and_ping(msg_notfound(vec=[CInv(MSG_WTX, WTXID)])) # Send notfound, so that fallback peer is selected
+ peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer_fallback.tx_getdata_count, 1)
+
+ def test_preferred_inv(self):
+ self.log.info('Check that invs from preferred peers are downloaded immediately')
+ self.restart_node(0, extra_args=['-whitelist=noban@127.0.0.1'])
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=0xff00ff00)]))
+ peer.wait_until(lambda: peer.tx_getdata_count >= 1, timeout=1)
+ with p2p_lock:
+ assert_equal(peer.tx_getdata_count, 1)
+
+ def test_large_inv_batch(self):
+ self.log.info('Test how large inv batches are handled with relay permission')
+ self.restart_node(0, extra_args=['-whitelist=relay@127.0.0.1'])
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
+ peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS + 1)
+
+ self.log.info('Test how large inv batches are handled without relay permission')
+ self.restart_node(0)
+ peer = self.nodes[0].add_p2p_connection(TestP2PConn())
+ peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
+ peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS)
+ peer.sync_with_ping()
+ with p2p_lock:
+ assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS)
def test_spurious_notfound(self):
self.log.info('Check that spurious notfound is ignored')
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
def run_test(self):
+ # Run tests without mocktime that only need one peer-connection first, to avoid restarting the nodes
+ self.test_expiry_fallback()
+ self.test_disconnect_fallback()
+ self.test_notfound_fallback()
+ self.test_preferred_inv()
+ self.test_large_inv_batch()
+ self.test_spurious_notfound()
+
# Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when
# the next trickle relay event happens.
- for test in [self.test_spurious_notfound, self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]:
+ for test in [self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]:
self.stop_nodes()
self.start_nodes()
self.connect_nodes(1, 0)