aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/Makefile.am1
-rw-r--r--src/Makefile.test.include1
-rw-r--r--src/test/fuzz/vecdeque.cpp491
-rw-r--r--src/util/vecdeque.h316
4 files changed, 809 insertions, 0 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index ad37928b4d..87bb2b945c 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -333,6 +333,7 @@ BITCOIN_CORE_H = \
util/translation.h \
util/types.h \
util/ui_change_type.h \
+ util/vecdeque.h \
util/vector.h \
validation.h \
validationinterface.h \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 742022ca93..8a638ec690 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -397,6 +397,7 @@ test_fuzz_fuzz_SOURCES = \
test/fuzz/utxo_snapshot.cpp \
test/fuzz/utxo_total_supply.cpp \
test/fuzz/validation_load_mempool.cpp \
+ test/fuzz/vecdeque.cpp \
test/fuzz/versionbits.cpp
endif # ENABLE_FUZZ_BINARY
diff --git a/src/test/fuzz/vecdeque.cpp b/src/test/fuzz/vecdeque.cpp
new file mode 100644
index 0000000000..1d9a98931f
--- /dev/null
+++ b/src/test/fuzz/vecdeque.cpp
@@ -0,0 +1,491 @@
+// Copyright (c) The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <span.h>
+#include <test/fuzz/util.h>
+#include <test/util/xoroshiro128plusplus.h>
+#include <util/vecdeque.h>
+
+#include <deque>
+#include <stdint.h>
+
+namespace {
+
+/** The maximum number of simultaneous buffers kept by the test. */
+static constexpr size_t MAX_BUFFERS{3};
+/** How many elements are kept in a buffer at most. */
+static constexpr size_t MAX_BUFFER_SIZE{48};
+/** How many operations are performed at most on the buffers in one test. */
+static constexpr size_t MAX_OPERATIONS{1024};
+
+/** Perform a simulation fuzz test on VecDeque type T.
+ *
+ * T must be constructible from a uint64_t seed, comparable to other T, copyable, and movable.
+ */
+template<typename T, bool CheckNoneLeft>
+void TestType(Span<const uint8_t> buffer, uint64_t rng_tweak)
+{
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ // Local RNG, only used for the seeds to initialize T objects with.
+ XoRoShiRo128PlusPlus rng(provider.ConsumeIntegral<uint64_t>() ^ rng_tweak);
+
+ // Real circular buffers.
+ std::vector<VecDeque<T>> real;
+ real.reserve(MAX_BUFFERS);
+ // Simulated circular buffers.
+ std::vector<std::deque<T>> sim;
+ sim.reserve(MAX_BUFFERS);
+ // Temporary object of type T.
+ std::optional<T> tmp;
+
+ // Compare a real and a simulated buffer.
+ auto compare_fn = [](const VecDeque<T>& r, const std::deque<T>& s) {
+ assert(r.size() == s.size());
+ assert(r.empty() == s.empty());
+ assert(r.capacity() >= r.size());
+ if (s.size() == 0) return;
+ assert(r.front() == s.front());
+ assert(r.back() == s.back());
+ for (size_t i = 0; i < s.size(); ++i) {
+ assert(r[i] == s[i]);
+ }
+ };
+
+ LIMITED_WHILE(provider.remaining_bytes(), MAX_OPERATIONS) {
+ int command = provider.ConsumeIntegral<uint8_t>() % 64;
+ unsigned idx = real.empty() ? 0 : provider.ConsumeIntegralInRange<unsigned>(0, real.size() - 1);
+ const size_t num_buffers = sim.size();
+ // Pick one operation based on value of command. Not all operations are always applicable.
+ // Loop through the applicable ones until command reaches 0 (which avoids the need to
+ // compute the number of applicable commands ahead of time).
+ const bool non_empty{num_buffers != 0};
+ const bool non_full{num_buffers < MAX_BUFFERS};
+ const bool partially_full{non_empty && non_full};
+ const bool multiple_exist{num_buffers > 1};
+ const bool existing_buffer_non_full{non_empty && sim[idx].size() < MAX_BUFFER_SIZE};
+ const bool existing_buffer_non_empty{non_empty && !sim[idx].empty()};
+ assert(non_full || non_empty);
+ while (true) {
+ if (non_full && command-- == 0) {
+ /* Default construct. */
+ real.emplace_back();
+ sim.emplace_back();
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* resize() */
+ compare_fn(real[idx], sim[idx]);
+ size_t new_size = provider.ConsumeIntegralInRange<size_t>(0, MAX_BUFFER_SIZE);
+ real[idx].resize(new_size);
+ sim[idx].resize(new_size);
+ assert(real[idx].size() == new_size);
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* clear() */
+ compare_fn(real[idx], sim[idx]);
+ real[idx].clear();
+ sim[idx].clear();
+ assert(real[idx].empty());
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* Copy construct default. */
+ compare_fn(real[idx], sim[idx]);
+ real[idx] = VecDeque<T>();
+ sim[idx].clear();
+ assert(real[idx].size() == 0);
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* Destruct. */
+ compare_fn(real.back(), sim.back());
+ real.pop_back();
+ sim.pop_back();
+ break;
+ }
+ if (partially_full && command-- == 0) {
+ /* Copy construct. */
+ real.emplace_back(real[idx]);
+ sim.emplace_back(sim[idx]);
+ break;
+ }
+ if (partially_full && command-- == 0) {
+ /* Move construct. */
+ VecDeque<T> copy(real[idx]);
+ real.emplace_back(std::move(copy));
+ sim.emplace_back(sim[idx]);
+ break;
+ }
+ if (multiple_exist && command-- == 0) {
+ /* swap() */
+ swap(real[idx], real[(idx + 1) % num_buffers]);
+ swap(sim[idx], sim[(idx + 1) % num_buffers]);
+ break;
+ }
+ if (multiple_exist && command-- == 0) {
+ /* Copy assign. */
+ compare_fn(real[idx], sim[idx]);
+ real[idx] = real[(idx + 1) % num_buffers];
+ sim[idx] = sim[(idx + 1) % num_buffers];
+ break;
+ }
+ if (multiple_exist && command-- == 0) {
+ /* Move assign. */
+ VecDeque<T> copy(real[(idx + 1) % num_buffers]);
+ compare_fn(real[idx], sim[idx]);
+ real[idx] = std::move(copy);
+ sim[idx] = sim[(idx + 1) % num_buffers];
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* Self swap() */
+ swap(real[idx], real[idx]);
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* Self-copy assign. */
+ real[idx] = real[idx];
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* Self-move assign. */
+ // Do not use std::move(real[idx]) here: -Wself-move correctly warns about that.
+ real[idx] = static_cast<VecDeque<T>&&>(real[idx]);
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* reserve() */
+ size_t res_size = provider.ConsumeIntegralInRange<size_t>(0, MAX_BUFFER_SIZE);
+ size_t old_cap = real[idx].capacity();
+ size_t old_size = real[idx].size();
+ real[idx].reserve(res_size);
+ assert(real[idx].size() == old_size);
+ assert(real[idx].capacity() == std::max(old_cap, res_size));
+ break;
+ }
+ if (non_empty && command-- == 0) {
+ /* shrink_to_fit() */
+ size_t old_size = real[idx].size();
+ real[idx].shrink_to_fit();
+ assert(real[idx].size() == old_size);
+ assert(real[idx].capacity() == old_size);
+ break;
+ }
+ if (existing_buffer_non_full && command-- == 0) {
+ /* push_back() (copying) */
+ tmp = T(rng());
+ size_t old_size = real[idx].size();
+ size_t old_cap = real[idx].capacity();
+ real[idx].push_back(*tmp);
+ sim[idx].push_back(*tmp);
+ assert(real[idx].size() == old_size + 1);
+ if (old_cap > old_size) {
+ assert(real[idx].capacity() == old_cap);
+ } else {
+ assert(real[idx].capacity() > old_cap);
+ assert(real[idx].capacity() <= 2 * (old_cap + 1));
+ }
+ break;
+ }
+ if (existing_buffer_non_full && command-- == 0) {
+ /* push_back() (moving) */
+ tmp = T(rng());
+ size_t old_size = real[idx].size();
+ size_t old_cap = real[idx].capacity();
+ sim[idx].push_back(*tmp);
+ real[idx].push_back(std::move(*tmp));
+ assert(real[idx].size() == old_size + 1);
+ if (old_cap > old_size) {
+ assert(real[idx].capacity() == old_cap);
+ } else {
+ assert(real[idx].capacity() > old_cap);
+ assert(real[idx].capacity() <= 2 * (old_cap + 1));
+ }
+ break;
+ }
+ if (existing_buffer_non_full && command-- == 0) {
+ /* emplace_back() */
+ uint64_t seed{rng()};
+ size_t old_size = real[idx].size();
+ size_t old_cap = real[idx].capacity();
+ sim[idx].emplace_back(seed);
+ real[idx].emplace_back(seed);
+ assert(real[idx].size() == old_size + 1);
+ if (old_cap > old_size) {
+ assert(real[idx].capacity() == old_cap);
+ } else {
+ assert(real[idx].capacity() > old_cap);
+ assert(real[idx].capacity() <= 2 * (old_cap + 1));
+ }
+ break;
+ }
+ if (existing_buffer_non_full && command-- == 0) {
+ /* push_front() (copying) */
+ tmp = T(rng());
+ size_t old_size = real[idx].size();
+ size_t old_cap = real[idx].capacity();
+ real[idx].push_front(*tmp);
+ sim[idx].push_front(*tmp);
+ assert(real[idx].size() == old_size + 1);
+ if (old_cap > old_size) {
+ assert(real[idx].capacity() == old_cap);
+ } else {
+ assert(real[idx].capacity() > old_cap);
+ assert(real[idx].capacity() <= 2 * (old_cap + 1));
+ }
+ break;
+ }
+ if (existing_buffer_non_full && command-- == 0) {
+ /* push_front() (moving) */
+ tmp = T(rng());
+ size_t old_size = real[idx].size();
+ size_t old_cap = real[idx].capacity();
+ sim[idx].push_front(*tmp);
+ real[idx].push_front(std::move(*tmp));
+ assert(real[idx].size() == old_size + 1);
+ if (old_cap > old_size) {
+ assert(real[idx].capacity() == old_cap);
+ } else {
+ assert(real[idx].capacity() > old_cap);
+ assert(real[idx].capacity() <= 2 * (old_cap + 1));
+ }
+ break;
+ }
+ if (existing_buffer_non_full && command-- == 0) {
+ /* emplace_front() */
+ uint64_t seed{rng()};
+ size_t old_size = real[idx].size();
+ size_t old_cap = real[idx].capacity();
+ sim[idx].emplace_front(seed);
+ real[idx].emplace_front(seed);
+ assert(real[idx].size() == old_size + 1);
+ if (old_cap > old_size) {
+ assert(real[idx].capacity() == old_cap);
+ } else {
+ assert(real[idx].capacity() > old_cap);
+ assert(real[idx].capacity() <= 2 * (old_cap + 1));
+ }
+ break;
+ }
+ if (existing_buffer_non_empty && command-- == 0) {
+ /* front() [modifying] */
+ tmp = T(rng());
+ size_t old_size = real[idx].size();
+ assert(sim[idx].front() == real[idx].front());
+ sim[idx].front() = *tmp;
+ real[idx].front() = std::move(*tmp);
+ assert(real[idx].size() == old_size);
+ break;
+ }
+ if (existing_buffer_non_empty && command-- == 0) {
+ /* back() [modifying] */
+ tmp = T(rng());
+ size_t old_size = real[idx].size();
+ assert(sim[idx].back() == real[idx].back());
+ sim[idx].back() = *tmp;
+ real[idx].back() = *tmp;
+ assert(real[idx].size() == old_size);
+ break;
+ }
+ if (existing_buffer_non_empty && command-- == 0) {
+ /* operator[] [modifying] */
+ tmp = T(rng());
+ size_t pos = provider.ConsumeIntegralInRange<size_t>(0, sim[idx].size() - 1);
+ size_t old_size = real[idx].size();
+ assert(sim[idx][pos] == real[idx][pos]);
+ sim[idx][pos] = *tmp;
+ real[idx][pos] = std::move(*tmp);
+ assert(real[idx].size() == old_size);
+ break;
+ }
+ if (existing_buffer_non_empty && command-- == 0) {
+ /* pop_front() */
+ assert(sim[idx].front() == real[idx].front());
+ size_t old_size = real[idx].size();
+ sim[idx].pop_front();
+ real[idx].pop_front();
+ assert(real[idx].size() == old_size - 1);
+ break;
+ }
+ if (existing_buffer_non_empty && command-- == 0) {
+ /* pop_back() */
+ assert(sim[idx].back() == real[idx].back());
+ size_t old_size = real[idx].size();
+ sim[idx].pop_back();
+ real[idx].pop_back();
+ assert(real[idx].size() == old_size - 1);
+ break;
+ }
+ }
+ }
+
+ /* Fully compare the final state. */
+ for (unsigned i = 0; i < sim.size(); ++i) {
+ // Make sure const getters work.
+ const VecDeque<T>& realbuf = real[i];
+ const std::deque<T>& simbuf = sim[i];
+ compare_fn(realbuf, simbuf);
+ for (unsigned j = 0; j < sim.size(); ++j) {
+ assert((realbuf == real[j]) == (simbuf == sim[j]));
+ assert(((realbuf <=> real[j]) >= 0) == (simbuf >= sim[j]));
+ assert(((realbuf <=> real[j]) <= 0) == (simbuf <= sim[j]));
+ }
+ // Clear out the buffers so we can check below that no objects exist anymore.
+ sim[i].clear();
+ real[i].clear();
+ }
+
+ if constexpr (CheckNoneLeft) {
+ tmp = std::nullopt;
+ T::CheckNoneExist();
+ }
+}
+
+/** Data structure with built-in tracking of all existing objects. */
+template<size_t Size>
+class TrackedObj
+{
+ static_assert(Size > 0);
+
+ /* Data type for map that actually stores the object data.
+ *
+ * The key is a pointer to the TrackedObj, the value is the uint64_t it was initialized with.
+ * Default-constructed and moved-from objects hold an std::nullopt.
+ */
+ using track_map_type = std::map<const TrackedObj<Size>*, std::optional<uint64_t>>;
+
+private:
+
+ /** Actual map. */
+ static inline track_map_type g_tracker;
+
+ /** Iterators into the tracker map for this object.
+ *
+ * This is an array of size Size, all holding the same value, to give the object configurable
+ * size. The value is g_tracker.end() if this object is not fully initialized. */
+ typename track_map_type::iterator m_track_entry[Size];
+
+ void Check() const
+ {
+ auto it = g_tracker.find(this);
+ for (size_t i = 0; i < Size; ++i) {
+ assert(m_track_entry[i] == it);
+ }
+ }
+
+ /** Create entry for this object in g_tracker and populate m_track_entry. */
+ void Register()
+ {
+ auto [it, inserted] = g_tracker.emplace(this, std::nullopt);
+ assert(inserted);
+ for (size_t i = 0; i < Size; ++i) {
+ m_track_entry[i] = it;
+ }
+ }
+
+ void Deregister()
+ {
+ Check();
+ assert(m_track_entry[0] != g_tracker.end());
+ g_tracker.erase(m_track_entry[0]);
+ for (size_t i = 0; i < Size; ++i) {
+ m_track_entry[i] = g_tracker.end();
+ }
+ }
+
+ /** Get value corresponding to this object in g_tracker. */
+ std::optional<uint64_t>& Deref()
+ {
+ Check();
+ assert(m_track_entry[0] != g_tracker.end());
+ return m_track_entry[0]->second;
+ }
+
+ /** Get value corresponding to this object in g_tracker. */
+ const std::optional<uint64_t>& Deref() const
+ {
+ Check();
+ assert(m_track_entry[0] != g_tracker.end());
+ return m_track_entry[0]->second;
+ }
+
+public:
+ ~TrackedObj() { Deregister(); }
+ TrackedObj() { Register(); }
+
+ TrackedObj(uint64_t value)
+ {
+ Register();
+ Deref() = value;
+ }
+
+ TrackedObj(const TrackedObj& other)
+ {
+ Register();
+ Deref() = other.Deref();
+ }
+
+ TrackedObj(TrackedObj&& other)
+ {
+ Register();
+ Deref() = other.Deref();
+ other.Deref() = std::nullopt;
+ }
+
+ TrackedObj& operator=(const TrackedObj& other)
+ {
+ if (this == &other) return *this;
+ Deref() = other.Deref();
+ return *this;
+ }
+
+ TrackedObj& operator=(TrackedObj&& other)
+ {
+ if (this == &other) return *this;
+ Deref() = other.Deref();
+ other.Deref() = std::nullopt;
+ return *this;
+ }
+
+ friend bool operator==(const TrackedObj& a, const TrackedObj& b)
+ {
+ return a.Deref() == b.Deref();
+ }
+
+ friend std::strong_ordering operator<=>(const TrackedObj& a, const TrackedObj& b)
+ {
+ // Libc++ 15 & 16 do not support std::optional<T>::operator<=> yet. See
+ // https://reviews.llvm.org/D146392.
+ if (!a.Deref().has_value() || !b.Deref().has_value()) {
+ return a.Deref().has_value() <=> b.Deref().has_value();
+ }
+ return *a.Deref() <=> *b.Deref();
+ }
+
+ static void CheckNoneExist()
+ {
+ assert(g_tracker.empty());
+ }
+};
+
+} // namespace
+
+FUZZ_TARGET(vecdeque)
+{
+ // Run the test with simple uints (which satisfy all the trivial properties).
+ static_assert(std::is_trivially_copyable_v<uint32_t>);
+ static_assert(std::is_trivially_destructible_v<uint64_t>);
+ TestType<uint8_t, false>(buffer, 1);
+ TestType<uint16_t, false>(buffer, 2);
+ TestType<uint32_t, false>(buffer, 3);
+ TestType<uint64_t, false>(buffer, 4);
+
+ // Run the test with TrackedObjs (which do not).
+ static_assert(!std::is_trivially_copyable_v<TrackedObj<3>>);
+ static_assert(!std::is_trivially_destructible_v<TrackedObj<17>>);
+ TestType<TrackedObj<1>, true>(buffer, 5);
+ TestType<TrackedObj<3>, true>(buffer, 6);
+ TestType<TrackedObj<17>, true>(buffer, 7);
+}
diff --git a/src/util/vecdeque.h b/src/util/vecdeque.h
new file mode 100644
index 0000000000..b5e7278473
--- /dev/null
+++ b/src/util/vecdeque.h
@@ -0,0 +1,316 @@
+// Copyright (c) The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_VECDEQUE_H
+#define BITCOIN_UTIL_VECDEQUE_H
+
+#include <util/check.h>
+
+#include <cstring>
+#include <memory>
+
+/** Data structure largely mimicking std::deque, but using single preallocated ring buffer.
+ *
+ * - More efficient and better memory locality than std::deque.
+ * - Most operations ({push_,pop_,emplace_,}{front,back}(), operator[], ...) are O(1),
+ * unless reallocation is needed (in which case they are O(n)).
+ * - Supports reserve(), capacity(), shrink_to_fit() like vectors.
+ * - No iterator support.
+ * - Data is not stored in a single contiguous block, so no data().
+ */
+template<typename T>
+class VecDeque
+{
+ /** Pointer to allocated memory. Can contain constructed and uninitialized T objects. */
+ T* m_buffer{nullptr};
+ /** m_buffer + m_offset points to first object in queue. m_offset = 0 if m_capacity is 0;
+ * otherwise 0 <= m_offset < m_capacity. */
+ size_t m_offset{0};
+ /** Number of objects in the container. 0 <= m_size <= m_capacity. */
+ size_t m_size{0};
+ /** The size of m_buffer, expressed as a multiple of the size of T. */
+ size_t m_capacity{0};
+
+ /** Returns the number of populated objects between m_offset and the end of the buffer. */
+ size_t FirstPart() const noexcept { return std::min(m_capacity - m_offset, m_size); }
+
+ void Reallocate(size_t capacity)
+ {
+ Assume(capacity >= m_size);
+ Assume((m_offset == 0 && m_capacity == 0) || m_offset < m_capacity);
+ // Allocate new buffer.
+ T* new_buffer = capacity ? std::allocator<T>().allocate(capacity) : nullptr;
+ if (capacity) {
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ // When T is trivially copyable, just copy the data over from old to new buffer.
+ size_t first_part = FirstPart();
+ if (first_part != 0) {
+ std::memcpy(new_buffer, m_buffer + m_offset, first_part * sizeof(T));
+ }
+ if (first_part != m_size) {
+ std::memcpy(new_buffer + first_part, m_buffer, (m_size - first_part) * sizeof(T));
+ }
+ } else {
+ // Otherwise move-construct in place in the new buffer, and destroy old buffer objects.
+ size_t old_pos = m_offset;
+ for (size_t new_pos = 0; new_pos < m_size; ++new_pos) {
+ std::construct_at(new_buffer + new_pos, std::move(*(m_buffer + old_pos)));
+ std::destroy_at(m_buffer + old_pos);
+ ++old_pos;
+ if (old_pos == m_capacity) old_pos = 0;
+ }
+ }
+ }
+ // Deallocate old buffer and update housekeeping.
+ std::allocator<T>().deallocate(m_buffer, m_capacity);
+ m_buffer = new_buffer;
+ m_offset = 0;
+ m_capacity = capacity;
+ Assume((m_offset == 0 && m_capacity == 0) || m_offset < m_capacity);
+ }
+
+ /** What index in the buffer does logical entry number pos have? */
+ size_t BufferIndex(size_t pos) const noexcept
+ {
+ Assume(pos < m_capacity);
+ // The expression below is used instead of the more obvious (pos + m_offset >= m_capacity),
+ // because the addition there could in theory overflow with very large deques.
+ if (pos >= m_capacity - m_offset) {
+ return (m_offset + pos) - m_capacity;
+ } else {
+ return m_offset + pos;
+ }
+ }
+
+ /** Specialization of resize() that can only shrink. Separate so that clear() can call it
+ * without requiring a default T constructor. */
+ void ResizeDown(size_t size) noexcept
+ {
+ Assume(size <= m_size);
+ if constexpr (std::is_trivially_destructible_v<T>) {
+ // If T is trivially destructible, we do not need to do anything but update the
+ // housekeeping record. Default constructor or zero-filling will be used when
+ // the space is reused.
+ m_size = size;
+ } else {
+ // If not, we need to invoke the destructor for every element separately.
+ while (m_size > size) {
+ std::destroy_at(m_buffer + BufferIndex(m_size - 1));
+ --m_size;
+ }
+ }
+ }
+
+public:
+ VecDeque() noexcept = default;
+
+ /** Resize the deque to be exactly size size (adding default-constructed elements if needed). */
+ void resize(size_t size)
+ {
+ if (size < m_size) {
+ // Delegate to ResizeDown when shrinking.
+ ResizeDown(size);
+ } else if (size > m_size) {
+ // When growing, first see if we need to allocate more space.
+ if (size > m_capacity) Reallocate(size);
+ while (m_size < size) {
+ std::construct_at(m_buffer + BufferIndex(m_size));
+ ++m_size;
+ }
+ }
+ }
+
+ /** Resize the deque to be size 0. The capacity will remain unchanged. */
+ void clear() noexcept { ResizeDown(0); }
+
+ /** Destroy a deque. */
+ ~VecDeque()
+ {
+ clear();
+ Reallocate(0);
+ }
+
+ /** Copy-assign a deque. */
+ VecDeque& operator=(const VecDeque& other)
+ {
+ if (&other == this) [[unlikely]] return *this;
+ clear();
+ Reallocate(other.m_size);
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ size_t first_part = other.FirstPart();
+ Assume(first_part > 0 || m_size == 0);
+ if (first_part != 0) {
+ std::memcpy(m_buffer, other.m_buffer + other.m_offset, first_part * sizeof(T));
+ }
+ if (first_part != other.m_size) {
+ std::memcpy(m_buffer + first_part, other.m_buffer, (other.m_size - first_part) * sizeof(T));
+ }
+ m_size = other.m_size;
+ } else {
+ while (m_size < other.m_size) {
+ std::construct_at(m_buffer + BufferIndex(m_size), other[m_size]);
+ ++m_size;
+ }
+ }
+ return *this;
+ }
+
+ /** Swap two deques. */
+ void swap(VecDeque& other) noexcept
+ {
+ std::swap(m_buffer, other.m_buffer);
+ std::swap(m_offset, other.m_offset);
+ std::swap(m_size, other.m_size);
+ std::swap(m_capacity, other.m_capacity);
+ }
+
+ /** Non-member version of swap. */
+ friend void swap(VecDeque& a, VecDeque& b) noexcept { a.swap(b); }
+
+ /** Move-assign a deque. */
+ VecDeque& operator=(VecDeque&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ /** Copy-construct a deque. */
+ VecDeque(const VecDeque& other) { *this = other; }
+ /** Move-construct a deque. */
+ VecDeque(VecDeque&& other) noexcept { swap(other); }
+
+ /** Equality comparison between two deques (only compares size+contents, not capacity). */
+ bool friend operator==(const VecDeque& a, const VecDeque& b)
+ {
+ if (a.m_size != b.m_size) return false;
+ for (size_t i = 0; i < a.m_size; ++i) {
+ if (a[i] != b[i]) return false;
+ }
+ return true;
+ }
+
+ /** Comparison between two deques, implementing lexicographic ordering on the contents. */
+ std::strong_ordering friend operator<=>(const VecDeque& a, const VecDeque& b)
+ {
+ size_t pos_a{0}, pos_b{0};
+ while (pos_a < a.m_size && pos_b < b.m_size) {
+ auto cmp = a[pos_a++] <=> b[pos_b++];
+ if (cmp != 0) return cmp;
+ }
+ return a.m_size <=> b.m_size;
+ }
+
+ /** Increase the capacity to capacity. Capacity will not shrink. */
+ void reserve(size_t capacity)
+ {
+ if (capacity > m_capacity) Reallocate(capacity);
+ }
+
+ /** Make the capacity equal to the size. The contents does not change. */
+ void shrink_to_fit()
+ {
+ if (m_capacity > m_size) Reallocate(m_size);
+ }
+
+ /** Construct a new element at the end of the deque. */
+ template<typename... Args>
+ void emplace_back(Args&&... args)
+ {
+ if (m_size == m_capacity) Reallocate((m_size + 1) * 2);
+ std::construct_at(m_buffer + BufferIndex(m_size), std::forward<Args>(args)...);
+ ++m_size;
+ }
+
+ /** Move-construct a new element at the end of the deque. */
+ void push_back(T&& elem) { emplace_back(std::move(elem)); }
+
+ /** Copy-construct a new element at the end of the deque. */
+ void push_back(const T& elem) { emplace_back(elem); }
+
+ /** Construct a new element at the beginning of the deque. */
+ template<typename... Args>
+ void emplace_front(Args&&... args)
+ {
+ if (m_size == m_capacity) Reallocate((m_size + 1) * 2);
+ std::construct_at(m_buffer + BufferIndex(m_capacity - 1), std::forward<Args>(args)...);
+ if (m_offset == 0) m_offset = m_capacity;
+ --m_offset;
+ ++m_size;
+ }
+
+ /** Copy-construct a new element at the beginning of the deque. */
+ void push_front(const T& elem) { emplace_front(elem); }
+
+ /** Move-construct a new element at the beginning of the deque. */
+ void push_front(T&& elem) { emplace_front(std::move(elem)); }
+
+ /** Remove the first element of the deque. Requires !empty(). */
+ void pop_front()
+ {
+ Assume(m_size);
+ std::destroy_at(m_buffer + m_offset);
+ --m_size;
+ ++m_offset;
+ if (m_offset == m_capacity) m_offset = 0;
+ }
+
+ /** Remove the last element of the deque. Requires !empty(). */
+ void pop_back()
+ {
+ Assume(m_size);
+ std::destroy_at(m_buffer + BufferIndex(m_size - 1));
+ --m_size;
+ }
+
+ /** Get a mutable reference to the first element of the deque. Requires !empty(). */
+ T& front() noexcept
+ {
+ Assume(m_size);
+ return m_buffer[m_offset];
+ }
+
+ /** Get a const reference to the first element of the deque. Requires !empty(). */
+ const T& front() const noexcept
+ {
+ Assume(m_size);
+ return m_buffer[m_offset];
+ }
+
+ /** Get a mutable reference to the last element of the deque. Requires !empty(). */
+ T& back() noexcept
+ {
+ Assume(m_size);
+ return m_buffer[BufferIndex(m_size - 1)];
+ }
+
+ /** Get a const reference to the last element of the deque. Requires !empty(). */
+ const T& back() const noexcept
+ {
+ Assume(m_size);
+ return m_buffer[BufferIndex(m_size - 1)];
+ }
+
+ /** Get a mutable reference to the element in the deque at the given index. Requires idx < size(). */
+ T& operator[](size_t idx) noexcept
+ {
+ Assume(idx < m_size);
+ return m_buffer[BufferIndex(idx)];
+ }
+
+ /** Get a const reference to the element in the deque at the given index. Requires idx < size(). */
+ const T& operator[](size_t idx) const noexcept
+ {
+ Assume(idx < m_size);
+ return m_buffer[BufferIndex(idx)];
+ }
+
+ /** Test whether the contents of this deque is empty. */
+ bool empty() const noexcept { return m_size == 0; }
+ /** Get the number of elements in this deque. */
+ size_t size() const noexcept { return m_size; }
+ /** Get the capacity of this deque (maximum size it can have without reallocating). */
+ size_t capacity() const noexcept { return m_capacity; }
+};
+
+#endif // BITCOIN_UTIL_VECDEQUE_H