aboutsummaryrefslogtreecommitdiff
path: root/src/util/hasher.h
diff options
context:
space:
mode:
authorAndrew Chow <achow101-github@achow101.com>2019-09-17 17:02:56 -0400
committerAndrew Chow <achow101-github@achow101.com>2020-11-10 14:33:37 -0500
commit95e61c1cf2a91d041c8025306ba36f0ea2806894 (patch)
tree225cae6060841c5049f9722b48776dd8901d4f59 /src/util/hasher.h
parent42f950cb27b732782d55282cdcd934396fcd7071 (diff)
downloadbitcoin-95e61c1cf2a91d041c8025306ba36f0ea2806894.tar.xz
Move Hashers to util/hasher.{cpp/h}
Move the hashers that we use for hash tables to a common place. Moved hashers: - SaltedTxidHasher - SaltedOutpointHasher - FilterHeaderHasher - SignatureCacheHasher - BlockHasher
Diffstat (limited to 'src/util/hasher.h')
-rw-r--r--src/util/hasher.h87
1 files changed, 87 insertions, 0 deletions
diff --git a/src/util/hasher.h b/src/util/hasher.h
new file mode 100644
index 0000000000..77301df63d
--- /dev/null
+++ b/src/util/hasher.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_HASHER_H
+#define BITCOIN_UTIL_HASHER_H
+
+#include <crypto/siphash.h>
+#include <primitives/transaction.h>
+#include <uint256.h>
+
+class SaltedTxidHasher
+{
+private:
+ /** Salt */
+ const uint64_t k0, k1;
+
+public:
+ SaltedTxidHasher();
+
+ size_t operator()(const uint256& txid) const {
+ return SipHashUint256(k0, k1, txid);
+ }
+};
+
+class SaltedOutpointHasher
+{
+private:
+ /** Salt */
+ const uint64_t k0, k1;
+
+public:
+ SaltedOutpointHasher();
+
+ /**
+ * This *must* return size_t. With Boost 1.46 on 32-bit systems the
+ * unordered_map will behave unpredictably if the custom hasher returns a
+ * uint64_t, resulting in failures when syncing the chain (#4634).
+ *
+ * Having the hash noexcept allows libstdc++'s unordered_map to recalculate
+ * the hash during rehash, so it does not have to cache the value. This
+ * reduces node's memory by sizeof(size_t). The required recalculation has
+ * a slight performance penalty (around 1.6%), but this is compensated by
+ * memory savings of about 9% which allow for a larger dbcache setting.
+ *
+ * @see https://gcc.gnu.org/onlinedocs/gcc-9.2.0/libstdc++/manual/manual/unordered_associative.html
+ */
+ size_t operator()(const COutPoint& id) const noexcept {
+ return SipHashUint256Extra(k0, k1, id.hash, id.n);
+ }
+};
+
+struct FilterHeaderHasher
+{
+ size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
+};
+
+/**
+ * We're hashing a nonce into the entries themselves, so we don't need extra
+ * blinding in the set hash computation.
+ *
+ * This may exhibit platform endian dependent behavior but because these are
+ * nonced hashes (random) and this state is only ever used locally it is safe.
+ * All that matters is local consistency.
+ */
+class SignatureCacheHasher
+{
+public:
+ template <uint8_t hash_select>
+ uint32_t operator()(const uint256& key) const
+ {
+ static_assert(hash_select <8, "SignatureCacheHasher only has 8 hashes available.");
+ uint32_t u;
+ std::memcpy(&u, key.begin()+4*hash_select, 4);
+ return u;
+ }
+};
+
+struct BlockHasher
+{
+ // this used to call `GetCheapHash()` in uint256, which was later moved; the
+ // cheap hash function simply calls ReadLE64() however, so the end result is
+ // identical
+ size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
+};
+
+#endif // BITCOIN_UTIL_HASHER_H