aboutsummaryrefslogtreecommitdiff
path: root/src/allocators.h
diff options
context:
space:
mode:
authorPieter Wuille <pieter.wuille@gmail.com>2014-09-19 19:21:46 +0200
committerPieter Wuille <pieter.wuille@gmail.com>2014-09-19 19:21:46 +0200
commit20e01b1a03819d843a860284033b48a5e3b65ff7 (patch)
tree5b390722b053ce5b448919bda2695d173980ffb5 /src/allocators.h
parent2fc6c67400e91846ca1c1c57011e57491013f9bd (diff)
Apply clang-format on some infrequently-updated files
Diffstat (limited to 'src/allocators.h')
-rw-r--r--src/allocators.h90
1 files changed, 49 insertions, 41 deletions
diff --git a/src/allocators.h b/src/allocators.h
index 65a7d08987..6b69e7ae69 100644
--- a/src/allocators.h
+++ b/src/allocators.h
@@ -26,14 +26,14 @@
* small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
* something like an interval tree would be the preferred data structure.
*/
-template <class Locker> class LockedPageManagerBase
+template <class Locker>
+class LockedPageManagerBase
{
public:
- LockedPageManagerBase(size_t page_size):
- page_size(page_size)
+ LockedPageManagerBase(size_t page_size) : page_size(page_size)
{
// Determine bitmask for extracting page from address
- assert(!(page_size & (page_size-1))); // size must be power of two
+ assert(!(page_size & (page_size - 1))); // size must be power of two
page_mask = ~(page_size - 1);
}
@@ -44,22 +44,21 @@ public:
// For all pages in affected range, increase lock count
- void LockRange(void *p, size_t size)
+ void LockRange(void* p, size_t size)
{
boost::mutex::scoped_lock lock(mutex);
- if(!size) return;
+ if (!size)
+ return;
const size_t base_addr = reinterpret_cast<size_t>(p);
const size_t start_page = base_addr & page_mask;
const size_t end_page = (base_addr + size - 1) & page_mask;
- for(size_t page = start_page; page <= end_page; page += page_size)
- {
+ for (size_t page = start_page; page <= end_page; page += page_size) {
Histogram::iterator it = histogram.find(page);
- if(it == histogram.end()) // Newly locked page
+ if (it == histogram.end()) // Newly locked page
{
locker.Lock(reinterpret_cast<void*>(page), page_size);
histogram.insert(std::make_pair(page, 1));
- }
- else // Page was already locked; increase counter
+ } else // Page was already locked; increase counter
{
it->second += 1;
}
@@ -67,20 +66,20 @@ public:
}
// For all pages in affected range, decrease lock count
- void UnlockRange(void *p, size_t size)
+ void UnlockRange(void* p, size_t size)
{
boost::mutex::scoped_lock lock(mutex);
- if(!size) return;
+ if (!size)
+ return;
const size_t base_addr = reinterpret_cast<size_t>(p);
const size_t start_page = base_addr & page_mask;
const size_t end_page = (base_addr + size - 1) & page_mask;
- for(size_t page = start_page; page <= end_page; page += page_size)
- {
+ for (size_t page = start_page; page <= end_page; page += page_size) {
Histogram::iterator it = histogram.find(page);
assert(it != histogram.end()); // Cannot unlock an area that was not locked
// Decrease counter for page, when it is zero, the page will be unlocked
it->second -= 1;
- if(it->second == 0) // Nothing on the page anymore that keeps it locked
+ if (it->second == 0) // Nothing on the page anymore that keeps it locked
{
// Unlock page and remove the count from histogram
locker.Unlock(reinterpret_cast<void*>(page), page_size);
@@ -101,7 +100,7 @@ private:
boost::mutex mutex;
size_t page_size, page_mask;
// map of page base address to lock count
- typedef std::map<size_t,int> Histogram;
+ typedef std::map<size_t, int> Histogram;
Histogram histogram;
};
@@ -116,11 +115,11 @@ public:
/** Lock memory pages.
* addr and len must be a multiple of the system page size
*/
- bool Lock(const void *addr, size_t len);
+ bool Lock(const void* addr, size_t len);
/** Unlock memory pages.
* addr and len must be a multiple of the system page size
*/
- bool Unlock(const void *addr, size_t len);
+ bool Unlock(const void* addr, size_t len);
};
/**
@@ -134,10 +133,10 @@ public:
* secure_allocator are created. So instead of having LockedPageManager also be
* static-initialized, it is created on demand.
*/
-class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
+class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker>
{
public:
- static LockedPageManager& Instance()
+ static LockedPageManager& Instance()
{
boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
return *LockedPageManager::_instance;
@@ -165,11 +164,15 @@ private:
// Functions for directly locking/unlocking memory objects.
// Intended for non-dynamically allocated structures.
//
-template<typename T> void LockObject(const T &t) {
+template <typename T>
+void LockObject(const T& t)
+{
LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
}
-template<typename T> void UnlockObject(const T &t) {
+template <typename T>
+void UnlockObject(const T& t)
+{
OPENSSL_cleanse((void*)(&t), sizeof(T));
LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
}
@@ -178,13 +181,12 @@ template<typename T> void UnlockObject(const T &t) {
// Allocator that locks its contents from being paged
// out of memory and clears its contents before deletion.
//
-template<typename T>
-struct secure_allocator : public std::allocator<T>
-{
+template <typename T>
+struct secure_allocator : public std::allocator<T> {
// MSVC8 default copy constructor is broken
typedef std::allocator<T> base;
typedef typename base::size_type size_type;
- typedef typename base::difference_type difference_type;
+ typedef typename base::difference_type difference_type;
typedef typename base::pointer pointer;
typedef typename base::const_pointer const_pointer;
typedef typename base::reference reference;
@@ -193,14 +195,18 @@ struct secure_allocator : public std::allocator<T>
secure_allocator() throw() {}
secure_allocator(const secure_allocator& a) throw() : base(a) {}
template <typename U>
- secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
+ secure_allocator(const secure_allocator<U>& a) throw() : base(a)
+ {
+ }
~secure_allocator() throw() {}
- template<typename _Other> struct rebind
- { typedef secure_allocator<_Other> other; };
+ template <typename _Other>
+ struct rebind {
+ typedef secure_allocator<_Other> other;
+ };
- T* allocate(std::size_t n, const void *hint = 0)
+ T* allocate(std::size_t n, const void* hint = 0)
{
- T *p;
+ T* p;
p = std::allocator<T>::allocate(n, hint);
if (p != NULL)
LockedPageManager::Instance().LockRange(p, sizeof(T) * n);
@@ -209,8 +215,7 @@ struct secure_allocator : public std::allocator<T>
void deallocate(T* p, std::size_t n)
{
- if (p != NULL)
- {
+ if (p != NULL) {
OPENSSL_cleanse(p, sizeof(T) * n);
LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n);
}
@@ -222,13 +227,12 @@ struct secure_allocator : public std::allocator<T>
//
// Allocator that clears its contents before deletion.
//
-template<typename T>
-struct zero_after_free_allocator : public std::allocator<T>
-{
+template <typename T>
+struct zero_after_free_allocator : public std::allocator<T> {
// MSVC8 default copy constructor is broken
typedef std::allocator<T> base;
typedef typename base::size_type size_type;
- typedef typename base::difference_type difference_type;
+ typedef typename base::difference_type difference_type;
typedef typename base::pointer pointer;
typedef typename base::const_pointer const_pointer;
typedef typename base::reference reference;
@@ -237,10 +241,14 @@ struct zero_after_free_allocator : public std::allocator<T>
zero_after_free_allocator() throw() {}
zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
template <typename U>
- zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
+ zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a)
+ {
+ }
~zero_after_free_allocator() throw() {}
- template<typename _Other> struct rebind
- { typedef zero_after_free_allocator<_Other> other; };
+ template <typename _Other>
+ struct rebind {
+ typedef zero_after_free_allocator<_Other> other;
+ };
void deallocate(T* p, std::size_t n)
{