From 20e01b1a03819d843a860284033b48a5e3b65ff7 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 19 Sep 2014 19:21:46 +0200 Subject: Apply clang-format on some infrequently-updated files --- src/allocators.h | 90 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 49 insertions(+), 41 deletions(-) (limited to 'src/allocators.h') diff --git a/src/allocators.h b/src/allocators.h index 65a7d08987..6b69e7ae69 100644 --- a/src/allocators.h +++ b/src/allocators.h @@ -26,14 +26,14 @@ * small objects that span up to a few pages, mostly smaller than a page. To support large allocations, * something like an interval tree would be the preferred data structure. */ -template class LockedPageManagerBase +template +class LockedPageManagerBase { public: - LockedPageManagerBase(size_t page_size): - page_size(page_size) + LockedPageManagerBase(size_t page_size) : page_size(page_size) { // Determine bitmask for extracting page from address - assert(!(page_size & (page_size-1))); // size must be power of two + assert(!(page_size & (page_size - 1))); // size must be power of two page_mask = ~(page_size - 1); } @@ -44,22 +44,21 @@ public: // For all pages in affected range, increase lock count - void LockRange(void *p, size_t size) + void LockRange(void* p, size_t size) { boost::mutex::scoped_lock lock(mutex); - if(!size) return; + if (!size) + return; const size_t base_addr = reinterpret_cast(p); const size_t start_page = base_addr & page_mask; const size_t end_page = (base_addr + size - 1) & page_mask; - for(size_t page = start_page; page <= end_page; page += page_size) - { + for (size_t page = start_page; page <= end_page; page += page_size) { Histogram::iterator it = histogram.find(page); - if(it == histogram.end()) // Newly locked page + if (it == histogram.end()) // Newly locked page { locker.Lock(reinterpret_cast(page), page_size); histogram.insert(std::make_pair(page, 1)); - } - else // Page was already locked; increase counter + } else // Page was already locked; increase counter { it->second += 1; } @@ -67,20 +66,20 @@ public: } // For all pages in affected range, decrease lock count - void UnlockRange(void *p, size_t size) + void UnlockRange(void* p, size_t size) { boost::mutex::scoped_lock lock(mutex); - if(!size) return; + if (!size) + return; const size_t base_addr = reinterpret_cast(p); const size_t start_page = base_addr & page_mask; const size_t end_page = (base_addr + size - 1) & page_mask; - for(size_t page = start_page; page <= end_page; page += page_size) - { + for (size_t page = start_page; page <= end_page; page += page_size) { Histogram::iterator it = histogram.find(page); assert(it != histogram.end()); // Cannot unlock an area that was not locked // Decrease counter for page, when it is zero, the page will be unlocked it->second -= 1; - if(it->second == 0) // Nothing on the page anymore that keeps it locked + if (it->second == 0) // Nothing on the page anymore that keeps it locked { // Unlock page and remove the count from histogram locker.Unlock(reinterpret_cast(page), page_size); @@ -101,7 +100,7 @@ private: boost::mutex mutex; size_t page_size, page_mask; // map of page base address to lock count - typedef std::map Histogram; + typedef std::map Histogram; Histogram histogram; }; @@ -116,11 +115,11 @@ public: /** Lock memory pages. * addr and len must be a multiple of the system page size */ - bool Lock(const void *addr, size_t len); + bool Lock(const void* addr, size_t len); /** Unlock memory pages. * addr and len must be a multiple of the system page size */ - bool Unlock(const void *addr, size_t len); + bool Unlock(const void* addr, size_t len); }; /** @@ -134,10 +133,10 @@ public: * secure_allocator are created. So instead of having LockedPageManager also be * static-initialized, it is created on demand. */ -class LockedPageManager: public LockedPageManagerBase +class LockedPageManager : public LockedPageManagerBase { public: - static LockedPageManager& Instance() + static LockedPageManager& Instance() { boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag); return *LockedPageManager::_instance; @@ -165,11 +164,15 @@ private: // Functions for directly locking/unlocking memory objects. // Intended for non-dynamically allocated structures. // -template void LockObject(const T &t) { +template +void LockObject(const T& t) +{ LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T)); } -template void UnlockObject(const T &t) { +template +void UnlockObject(const T& t) +{ OPENSSL_cleanse((void*)(&t), sizeof(T)); LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T)); } @@ -178,13 +181,12 @@ template void UnlockObject(const T &t) { // Allocator that locks its contents from being paged // out of memory and clears its contents before deletion. // -template -struct secure_allocator : public std::allocator -{ +template +struct secure_allocator : public std::allocator { // MSVC8 default copy constructor is broken typedef std::allocator base; typedef typename base::size_type size_type; - typedef typename base::difference_type difference_type; + typedef typename base::difference_type difference_type; typedef typename base::pointer pointer; typedef typename base::const_pointer const_pointer; typedef typename base::reference reference; @@ -193,14 +195,18 @@ struct secure_allocator : public std::allocator secure_allocator() throw() {} secure_allocator(const secure_allocator& a) throw() : base(a) {} template - secure_allocator(const secure_allocator& a) throw() : base(a) {} + secure_allocator(const secure_allocator& a) throw() : base(a) + { + } ~secure_allocator() throw() {} - template struct rebind - { typedef secure_allocator<_Other> other; }; + template + struct rebind { + typedef secure_allocator<_Other> other; + }; - T* allocate(std::size_t n, const void *hint = 0) + T* allocate(std::size_t n, const void* hint = 0) { - T *p; + T* p; p = std::allocator::allocate(n, hint); if (p != NULL) LockedPageManager::Instance().LockRange(p, sizeof(T) * n); @@ -209,8 +215,7 @@ struct secure_allocator : public std::allocator void deallocate(T* p, std::size_t n) { - if (p != NULL) - { + if (p != NULL) { OPENSSL_cleanse(p, sizeof(T) * n); LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); } @@ -222,13 +227,12 @@ struct secure_allocator : public std::allocator // // Allocator that clears its contents before deletion. // -template -struct zero_after_free_allocator : public std::allocator -{ +template +struct zero_after_free_allocator : public std::allocator { // MSVC8 default copy constructor is broken typedef std::allocator base; typedef typename base::size_type size_type; - typedef typename base::difference_type difference_type; + typedef typename base::difference_type difference_type; typedef typename base::pointer pointer; typedef typename base::const_pointer const_pointer; typedef typename base::reference reference; @@ -237,10 +241,14 @@ struct zero_after_free_allocator : public std::allocator zero_after_free_allocator() throw() {} zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {} template - zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {} + zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) + { + } ~zero_after_free_allocator() throw() {} - template struct rebind - { typedef zero_after_free_allocator<_Other> other; }; + template + struct rebind { + typedef zero_after_free_allocator<_Other> other; + }; void deallocate(T* p, std::size_t n) { -- cgit v1.2.3