aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/support/lockedpool.cpp127
-rw-r--r--src/support/lockedpool.h24
-rw-r--r--src/test/allocator_tests.cpp14
3 files changed, 75 insertions, 90 deletions
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index 813869a131..01273c9791 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -26,6 +26,8 @@
#include <unistd.h> // for sysconf
#endif
+#include <algorithm>
+
LockedPoolManager* LockedPoolManager::_instance = NULL;
std::once_flag LockedPoolManager::init_flag;
@@ -45,7 +47,7 @@ Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
{
// Start with one free chunk that covers the entire arena
- chunks.emplace(base, Chunk(size_in, false));
+ chunks_free.emplace(base, size_in);
}
Arena::~Arena()
@@ -57,24 +59,30 @@ void* Arena::alloc(size_t size)
// Round to next multiple of alignment
size = align_up(size, alignment);
- // Don't handle zero-sized chunks, or those bigger than MAX_SIZE
- if (size == 0 || size >= Chunk::MAX_SIZE) {
+ // Don't handle zero-sized chunks
+ if (size == 0)
return nullptr;
- }
- for (auto& chunk: chunks) {
- if (!chunk.second.isInUse() && size <= chunk.second.getSize()) {
- char* _base = chunk.first;
- size_t leftover = chunk.second.getSize() - size;
- if (leftover > 0) { // Split chunk
- chunks.emplace(_base + size, Chunk(leftover, false));
- chunk.second.setSize(size);
- }
- chunk.second.setInUse(true);
- return reinterpret_cast<void*>(_base);
- }
+ // Pick a large enough free-chunk
+ auto it = std::find_if(chunks_free.begin(), chunks_free.end(),
+ [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; });
+ if (it == chunks_free.end())
+ return nullptr;
+
+ // Create the used-chunk, taking its space from the end of the free-chunk
+ auto alloced = chunks_used.emplace(it->first + it->second - size, size).first;
+ if (!(it->second -= size))
+ chunks_free.erase(it);
+ return reinterpret_cast<void*>(alloced->first);
+}
+
+/* extend the Iterator if other begins at its end */
+template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) {
+ if (it->first + it->second == other.first) {
+ it->second += other.second;
+ return true;
}
- return nullptr;
+ return false;
}
void Arena::free(void *ptr)
@@ -83,65 +91,49 @@ void Arena::free(void *ptr)
if (ptr == nullptr) {
return;
}
- auto i = chunks.find(static_cast<char*>(ptr));
- if (i == chunks.end() || !i->second.isInUse()) {
- throw std::runtime_error("Arena: invalid or double free");
- }
- i->second.setInUse(false);
-
- if (i != chunks.begin()) { // Absorb into previous chunk if exists and free
- auto prev = i;
- --prev;
- if (!prev->second.isInUse()) {
- // Absorb current chunk size into previous chunk.
- prev->second.setSize(prev->second.getSize() + i->second.getSize());
- // Erase current chunk. Erasing does not invalidate current
- // iterators for a map, except for that pointing to the object
- // itself, which will be overwritten in the next statement.
- chunks.erase(i);
- // From here on, the previous chunk is our current chunk.
- i = prev;
- }
- }
- auto next = i;
- ++next;
- if (next != chunks.end()) { // Absorb next chunk if exists and free
- if (!next->second.isInUse()) {
- // Absurb next chunk size into current chunk
- i->second.setSize(i->second.getSize() + next->second.getSize());
- // Erase next chunk.
- chunks.erase(next);
- }
+ // Remove chunk from used map
+ auto i = chunks_used.find(static_cast<char*>(ptr));
+ if (i == chunks_used.end()) {
+ throw std::runtime_error("Arena: invalid or double free");
}
+ auto freed = *i;
+ chunks_used.erase(i);
+
+ // Add space to free map, coalescing contiguous chunks
+ auto next = chunks_free.upper_bound(freed.first);
+ auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next);
+ if (prev == chunks_free.end() || !extend(prev, freed))
+ prev = chunks_free.emplace_hint(next, freed);
+ if (next != chunks_free.end() && extend(prev, *next))
+ chunks_free.erase(next);
}
Arena::Stats Arena::stats() const
{
- Arena::Stats r;
- r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
- for (const auto& chunk: chunks) {
- if (chunk.second.isInUse()) {
- r.used += chunk.second.getSize();
- r.chunks_used += 1;
- } else {
- r.free += chunk.second.getSize();
- r.chunks_free += 1;
- }
- r.total += chunk.second.getSize();
- }
+ Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
+ for (const auto& chunk: chunks_used)
+ r.used += chunk.second;
+ for (const auto& chunk: chunks_free)
+ r.free += chunk.second;
+ r.total = r.used + r.free;
return r;
}
#ifdef ARENA_DEBUG
+void printchunk(char* base, size_t sz, bool used) {
+ std::cout <<
+ "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
+ " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
+ " 0x" << used << std::endl;
+}
void Arena::walk() const
{
- for (const auto& chunk: chunks) {
- std::cout <<
- "0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first <<
- " 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() <<
- " 0x" << chunk.second.isInUse() << std::endl;
- }
+ for (const auto& chunk: chunks_used)
+ printchunk(chunk.first, chunk.second, true);
+ std::cout << std::endl;
+ for (const auto& chunk: chunks_free)
+ printchunk(chunk.first, chunk.second, false);
std::cout << std::endl;
}
#endif
@@ -276,6 +268,11 @@ LockedPool::~LockedPool()
void* LockedPool::alloc(size_t size)
{
std::lock_guard<std::mutex> lock(mutex);
+
+ // Don't handle impossible sizes
+ if (size == 0 || size > ARENA_SIZE)
+ return nullptr;
+
// Try allocating from each current arena
for (auto &arena: arenas) {
void *addr = arena.alloc(size);
@@ -307,9 +304,7 @@ void LockedPool::free(void *ptr)
LockedPool::Stats LockedPool::stats() const
{
std::lock_guard<std::mutex> lock(mutex);
- LockedPool::Stats r;
- r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
- r.locked = cumulative_bytes_locked;
+ LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
for (const auto &arena: arenas) {
Arena::Stats i = arena.stats();
r.used += i.used;
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
index 526c17a73f..3403415436 100644
--- a/src/support/lockedpool.h
+++ b/src/support/lockedpool.h
@@ -50,27 +50,6 @@ public:
Arena(void *base, size_t size, size_t alignment);
virtual ~Arena();
- /** A chunk of memory.
- */
- struct Chunk
- {
- /** Most significant bit of size_t. This is used to mark
- * in-usedness of chunk.
- */
- const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1);
- /** Maximum size of a chunk */
- const static size_t MAX_SIZE = SIZE_MSB - 1;
-
- Chunk(size_t size_in, bool used_in):
- size(size_in | (used_in ? SIZE_MSB : 0)) {}
-
- bool isInUse() const { return size & SIZE_MSB; }
- void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); }
- size_t getSize() const { return size & ~SIZE_MSB; }
- void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; }
- private:
- size_t size;
- };
/** Memory statistics. */
struct Stats
{
@@ -112,7 +91,8 @@ private:
/** Map of chunk address to chunk information. This class makes use of the
* sorted order to merge previous and next chunks during deallocation.
*/
- std::map<char*, Chunk> chunks;
+ std::map<char*, size_t> chunks_free;
+ std::map<char*, size_t> chunks_used;
/** Base address of arena */
char* base;
/** End address of arena */
diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp
index f0e848655f..77e9df5d82 100644
--- a/src/test/allocator_tests.cpp
+++ b/src/test/allocator_tests.cpp
@@ -39,7 +39,6 @@ BOOST_AUTO_TEST_CASE(arena_tests)
}
void *a0 = b.alloc(128);
- BOOST_CHECK(a0 == synth_base); // first allocation must start at beginning
void *a1 = b.alloc(256);
void *a2 = b.alloc(512);
BOOST_CHECK(b.stats().used == 896);
@@ -63,8 +62,10 @@ BOOST_AUTO_TEST_CASE(arena_tests)
BOOST_CHECK(b.stats().used == 128);
b.free(a3);
BOOST_CHECK(b.stats().used == 0);
+ BOOST_CHECK_EQUAL(b.stats().chunks_used, 0);
BOOST_CHECK(b.stats().total == synth_size);
BOOST_CHECK(b.stats().free == synth_size);
+ BOOST_CHECK_EQUAL(b.stats().chunks_free, 1);
std::vector<void*> addr;
BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr
@@ -74,7 +75,6 @@ BOOST_AUTO_TEST_CASE(arena_tests)
// Sweeping allocate all memory
for (int x=0; x<1024; ++x)
addr.push_back(b.alloc(1024));
- BOOST_CHECK(addr[0] == synth_base); // first allocation must start at beginning
BOOST_CHECK(b.stats().free == 0);
BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr
BOOST_CHECK(b.alloc(0) == nullptr);
@@ -166,6 +166,16 @@ BOOST_AUTO_TEST_CASE(lockedpool_tests_mock)
BOOST_CHECK(pool.stats().total == 0);
BOOST_CHECK(pool.stats().locked == 0);
+ // Ensure unreasonable requests are refused without allocating anything
+ void *invalid_toosmall = pool.alloc(0);
+ BOOST_CHECK(invalid_toosmall == nullptr);
+ BOOST_CHECK(pool.stats().used == 0);
+ BOOST_CHECK(pool.stats().free == 0);
+ void *invalid_toobig = pool.alloc(LockedPool::ARENA_SIZE+1);
+ BOOST_CHECK(invalid_toobig == nullptr);
+ BOOST_CHECK(pool.stats().used == 0);
+ BOOST_CHECK(pool.stats().free == 0);
+
void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2);
BOOST_CHECK(a0);
BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);