aboutsummaryrefslogtreecommitdiff
path: root/src/support
diff options
context:
space:
mode:
Diffstat (limited to 'src/support')
-rw-r--r--src/support/lockedpool.cpp72
-rw-r--r--src/support/lockedpool.h19
2 files changed, 60 insertions, 31 deletions
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index eae96e1429..51c337ed2f 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -48,7 +48,9 @@ Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
{
// Start with one free chunk that covers the entire arena
- chunks_free.emplace(base, size_in);
+ auto it = size_to_free_chunk.emplace(size_in, base);
+ chunks_free.emplace(base, it);
+ chunks_free_end.emplace(base + size_in, it);
}
Arena::~Arena()
@@ -64,26 +66,30 @@ void* Arena::alloc(size_t size)
if (size == 0)
return nullptr;
- // Pick a large enough free-chunk
- auto it = std::find_if(chunks_free.begin(), chunks_free.end(),
- [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; });
- if (it == chunks_free.end())
+ // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key.
+ // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review",
+ // Wilson et. al. 1995, http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit
+ // policies seem to work well in practice.
+ auto size_ptr_it = size_to_free_chunk.lower_bound(size);
+ if (size_ptr_it == size_to_free_chunk.end())
return nullptr;
// Create the used-chunk, taking its space from the end of the free-chunk
- auto alloced = chunks_used.emplace(it->first + it->second - size, size).first;
- if (!(it->second -= size))
- chunks_free.erase(it);
- return reinterpret_cast<void*>(alloced->first);
-}
-
-/* extend the Iterator if other begins at its end */
-template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) {
- if (it->first + it->second == other.first) {
- it->second += other.second;
- return true;
+ const size_t size_remaining = size_ptr_it->first - size;
+ auto alloced = chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
+ chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first);
+ if (size_ptr_it->first == size) {
+ // whole chunk is used up
+ chunks_free.erase(size_ptr_it->second);
+ } else {
+ // still some memory left in the chunk
+ auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
+ chunks_free[size_ptr_it->second] = it_remaining;
+ chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
}
- return false;
+ size_to_free_chunk.erase(size_ptr_it);
+
+ return reinterpret_cast<void*>(alloced->first);
}
void Arena::free(void *ptr)
@@ -98,16 +104,30 @@ void Arena::free(void *ptr)
if (i == chunks_used.end()) {
throw std::runtime_error("Arena: invalid or double free");
}
- auto freed = *i;
+ std::pair<char*, size_t> freed = *i;
chunks_used.erase(i);
- // Add space to free map, coalescing contiguous chunks
- auto next = chunks_free.upper_bound(freed.first);
- auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next);
- if (prev == chunks_free.end() || !extend(prev, freed))
- prev = chunks_free.emplace_hint(next, freed);
- if (next != chunks_free.end() && extend(prev, *next))
+ // coalesce freed with previous chunk
+ auto prev = chunks_free_end.find(freed.first);
+ if (prev != chunks_free_end.end()) {
+ freed.first -= prev->second->first;
+ freed.second += prev->second->first;
+ size_to_free_chunk.erase(prev->second);
+ chunks_free_end.erase(prev);
+ }
+
+ // coalesce freed with chunk after freed
+ auto next = chunks_free.find(freed.first + freed.second);
+ if (next != chunks_free.end()) {
+ freed.second += next->second->first;
+ size_to_free_chunk.erase(next->second);
chunks_free.erase(next);
+ }
+
+ // Add/set space with coalesced free chunk
+ auto it = size_to_free_chunk.emplace(freed.second, freed.first);
+ chunks_free[freed.first] = it;
+ chunks_free_end[freed.first + freed.second] = it;
}
Arena::Stats Arena::stats() const
@@ -116,7 +136,7 @@ Arena::Stats Arena::stats() const
for (const auto& chunk: chunks_used)
r.used += chunk.second;
for (const auto& chunk: chunks_free)
- r.free += chunk.second;
+ r.free += chunk.second->first;
r.total = r.used + r.free;
return r;
}
@@ -185,7 +205,7 @@ void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
size_t Win32LockedPageAllocator::GetLimit()
{
- // TODO is there a limit on windows, how to get it?
+ // TODO is there a limit on Windows, how to get it?
return std::numeric_limits<size_t>::max();
}
#endif
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
index fc85e6c73c..ccfae16701 100644
--- a/src/support/lockedpool.h
+++ b/src/support/lockedpool.h
@@ -10,6 +10,7 @@
#include <map>
#include <mutex>
#include <memory>
+#include <unordered_map>
/**
* OS-dependent allocation and deallocation of locked/pinned memory pages.
@@ -88,11 +89,19 @@ public:
*/
bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
private:
- /** Map of chunk address to chunk information. This class makes use of the
- * sorted order to merge previous and next chunks during deallocation.
- */
- std::map<char*, size_t> chunks_free;
- std::map<char*, size_t> chunks_used;
+ typedef std::multimap<size_t, char*> SizeToChunkSortedMap;
+ /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */
+ SizeToChunkSortedMap size_to_free_chunk;
+
+ typedef std::unordered_map<char*, SizeToChunkSortedMap::const_iterator> ChunkToSizeMap;
+ /** Map from begin of free chunk to its node in size_to_free_chunk */
+ ChunkToSizeMap chunks_free;
+ /** Map from end of free chunk to its node in size_to_free_chunk */
+ ChunkToSizeMap chunks_free_end;
+
+ /** Map from begin of used chunk to its size */
+ std::unordered_map<char*, size_t> chunks_used;
+
/** Base address of arena */
char* base;
/** End address of arena */