aboutsummaryrefslogtreecommitdiff
path: root/src/bench
diff options
context:
space:
mode:
authorMartin Leitner-Ankerl <martin.ankerl@gmail.com>2022-06-11 09:23:51 +0200
committerMartin Leitner-Ankerl <martin.ankerl@gmail.com>2023-03-23 19:38:38 +0100
commitb8401c3281978beed6198b2f9782b6a8dd35cbd7 (patch)
treefb740536de54eb5bc056b52da7430c9137b0edd2 /src/bench
parent23056436461a8b3af1a504b9638c48e8c8170652 (diff)
Add pool based memory resource & allocator
A memory resource similar to std::pmr::unsynchronized_pool_resource, but optimized for node-based containers. Co-Authored-By: Pieter Wuille <pieter@wuille.net>
Diffstat (limited to 'src/bench')
-rw-r--r--src/bench/pool.cpp50
1 files changed, 50 insertions, 0 deletions
diff --git a/src/bench/pool.cpp b/src/bench/pool.cpp
new file mode 100644
index 0000000000..b3e54d85a2
--- /dev/null
+++ b/src/bench/pool.cpp
@@ -0,0 +1,50 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bench/bench.h>
+#include <support/allocators/pool.h>
+
+#include <unordered_map>
+
+template <typename Map>
+void BenchFillClearMap(benchmark::Bench& bench, Map& map)
+{
+ size_t batch_size = 5000;
+
+ // make sure each iteration of the benchmark contains exactly 5000 inserts and one clear.
+ // do this at least 10 times so we get reasonable accurate results
+
+ bench.batch(batch_size).minEpochIterations(10).run([&] {
+ auto rng = ankerl::nanobench::Rng(1234);
+ for (size_t i = 0; i < batch_size; ++i) {
+ map[rng()];
+ }
+ map.clear();
+ });
+}
+
+static void PoolAllocator_StdUnorderedMap(benchmark::Bench& bench)
+{
+ auto map = std::unordered_map<uint64_t, uint64_t>();
+ BenchFillClearMap(bench, map);
+}
+
+static void PoolAllocator_StdUnorderedMapWithPoolResource(benchmark::Bench& bench)
+{
+ using Map = std::unordered_map<uint64_t,
+ uint64_t,
+ std::hash<uint64_t>,
+ std::equal_to<uint64_t>,
+ PoolAllocator<std::pair<const uint64_t, uint64_t>,
+ sizeof(std::pair<const uint64_t, uint64_t>) + 4 * sizeof(void*),
+ alignof(void*)>>;
+
+ // make sure the resource supports large enough pools to hold the node. We do this by adding the size of a few pointers to it.
+ auto pool_resource = Map::allocator_type::ResourceType();
+ auto map = Map{0, std::hash<uint64_t>{}, std::equal_to<uint64_t>{}, &pool_resource};
+ BenchFillClearMap(bench, map);
+}
+
+BENCHMARK(PoolAllocator_StdUnorderedMap, benchmark::PriorityLevel::HIGH);
+BENCHMARK(PoolAllocator_StdUnorderedMapWithPoolResource, benchmark::PriorityLevel::HIGH);