aboutsummaryrefslogtreecommitdiff
path: root/src/util/hasher.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/util/hasher.h')
-rw-r--r--src/util/hasher.h99
1 files changed, 99 insertions, 0 deletions
diff --git a/src/util/hasher.h b/src/util/hasher.h
new file mode 100644
index 0000000000..fa2fea30d8
--- /dev/null
+++ b/src/util/hasher.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_HASHER_H
+#define BITCOIN_UTIL_HASHER_H
+
+#include <crypto/siphash.h>
+#include <primitives/transaction.h>
+#include <uint256.h>
+
+class SaltedTxidHasher
+{
+private:
+ /** Salt */
+ const uint64_t k0, k1;
+
+public:
+ SaltedTxidHasher();
+
+ size_t operator()(const uint256& txid) const {
+ return SipHashUint256(k0, k1, txid);
+ }
+};
+
+class SaltedOutpointHasher
+{
+private:
+ /** Salt */
+ const uint64_t k0, k1;
+
+public:
+ SaltedOutpointHasher();
+
+ /**
+ * This *must* return size_t. With Boost 1.46 on 32-bit systems the
+ * unordered_map will behave unpredictably if the custom hasher returns a
+ * uint64_t, resulting in failures when syncing the chain (#4634).
+ *
+ * Having the hash noexcept allows libstdc++'s unordered_map to recalculate
+ * the hash during rehash, so it does not have to cache the value. This
+ * reduces node's memory by sizeof(size_t). The required recalculation has
+ * a slight performance penalty (around 1.6%), but this is compensated by
+ * memory savings of about 9% which allow for a larger dbcache setting.
+ *
+ * @see https://gcc.gnu.org/onlinedocs/gcc-9.2.0/libstdc++/manual/manual/unordered_associative.html
+ */
+ size_t operator()(const COutPoint& id) const noexcept {
+ return SipHashUint256Extra(k0, k1, id.hash, id.n);
+ }
+};
+
+struct FilterHeaderHasher
+{
+ size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
+};
+
+/**
+ * We're hashing a nonce into the entries themselves, so we don't need extra
+ * blinding in the set hash computation.
+ *
+ * This may exhibit platform endian dependent behavior but because these are
+ * nonced hashes (random) and this state is only ever used locally it is safe.
+ * All that matters is local consistency.
+ */
+class SignatureCacheHasher
+{
+public:
+ template <uint8_t hash_select>
+ uint32_t operator()(const uint256& key) const
+ {
+ static_assert(hash_select <8, "SignatureCacheHasher only has 8 hashes available.");
+ uint32_t u;
+ std::memcpy(&u, key.begin()+4*hash_select, 4);
+ return u;
+ }
+};
+
+struct BlockHasher
+{
+ // this used to call `GetCheapHash()` in uint256, which was later moved; the
+ // cheap hash function simply calls ReadLE64() however, so the end result is
+ // identical
+ size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
+};
+
+class SaltedSipHasher
+{
+private:
+ /** Salt */
+ const uint64_t m_k0, m_k1;
+
+public:
+ SaltedSipHasher();
+
+ size_t operator()(const Span<const unsigned char>& script) const;
+};
+
+#endif // BITCOIN_UTIL_HASHER_H