diff options
Diffstat (limited to 'src/support')
-rw-r--r-- | src/support/allocators/secure.h | 14 | ||||
-rw-r--r-- | src/support/cleanse.h | 1 | ||||
-rw-r--r-- | src/support/events.h | 56 | ||||
-rw-r--r-- | src/support/lockedpool.cpp | 385 | ||||
-rw-r--r-- | src/support/lockedpool.h | 231 | ||||
-rw-r--r-- | src/support/pagelocker.cpp | 70 | ||||
-rw-r--r-- | src/support/pagelocker.h | 177 |
7 files changed, 678 insertions, 256 deletions
diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 1ec40fe830..9daba86ef3 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -1,12 +1,13 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin Core developers +// Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SUPPORT_ALLOCATORS_SECURE_H #define BITCOIN_SUPPORT_ALLOCATORS_SECURE_H -#include "support/pagelocker.h" +#include "support/lockedpool.h" +#include "support/cleanse.h" #include <string> @@ -39,20 +40,15 @@ struct secure_allocator : public std::allocator<T> { T* allocate(std::size_t n, const void* hint = 0) { - T* p; - p = std::allocator<T>::allocate(n, hint); - if (p != NULL) - LockedPageManager::Instance().LockRange(p, sizeof(T) * n); - return p; + return static_cast<T*>(LockedPoolManager::Instance().alloc(sizeof(T) * n)); } void deallocate(T* p, std::size_t n) { if (p != NULL) { memory_cleanse(p, sizeof(T) * n); - LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); } - std::allocator<T>::deallocate(p, n); + LockedPoolManager::Instance().free(p); } }; diff --git a/src/support/cleanse.h b/src/support/cleanse.h index 3e02aa8fd1..f020216c73 100644 --- a/src/support/cleanse.h +++ b/src/support/cleanse.h @@ -8,6 +8,7 @@ #include <stdlib.h> +// Attempt to overwrite data in the specified memory span. void memory_cleanse(void *ptr, size_t len); #endif // BITCOIN_SUPPORT_CLEANSE_H diff --git a/src/support/events.h b/src/support/events.h new file mode 100644 index 0000000000..4f2f3cf9ef --- /dev/null +++ b/src/support/events.h @@ -0,0 +1,56 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_EVENTS_H +#define BITCOIN_SUPPORT_EVENTS_H + +#include <ios> +#include <memory> + +#include <event2/event.h> +#include <event2/http.h> + +#define MAKE_RAII(type) \ +/* deleter */\ +struct type##_deleter {\ + void operator()(struct type* ob) {\ + type##_free(ob);\ + }\ +};\ +/* unique ptr typedef */\ +typedef std::unique_ptr<struct type, type##_deleter> raii_##type + +MAKE_RAII(event_base); +MAKE_RAII(event); +MAKE_RAII(evhttp); +MAKE_RAII(evhttp_request); +MAKE_RAII(evhttp_connection); + +raii_event_base obtain_event_base() { + auto result = raii_event_base(event_base_new()); + if (!result.get()) + throw std::runtime_error("cannot create event_base"); + return result; +} + +raii_event obtain_event(struct event_base* base, evutil_socket_t s, short events, event_callback_fn cb, void* arg) { + return raii_event(event_new(base, s, events, cb, arg)); +} + +raii_evhttp obtain_evhttp(struct event_base* base) { + return raii_evhttp(evhttp_new(base)); +} + +raii_evhttp_request obtain_evhttp_request(void(*cb)(struct evhttp_request *, void *), void *arg) { + return raii_evhttp_request(evhttp_request_new(cb, arg)); +} + +raii_evhttp_connection obtain_evhttp_connection_base(struct event_base* base, std::string host, uint16_t port) { + auto result = raii_evhttp_connection(evhttp_connection_base_new(base, NULL, host.c_str(), port)); + if (!result.get()) + throw std::runtime_error("create connection failed"); + return result; +} + +#endif // BITCOIN_SUPPORT_EVENTS_H diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp new file mode 100644 index 0000000000..98c1581093 --- /dev/null +++ b/src/support/lockedpool.cpp @@ -0,0 +1,385 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "support/lockedpool.h" +#include "support/cleanse.h" + +#if defined(HAVE_CONFIG_H) +#include "config/bitcoin-config.h" +#endif + +#ifdef WIN32 +#ifdef _WIN32_WINNT +#undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0501 +#define WIN32_LEAN_AND_MEAN 1 +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include <windows.h> +#else +#include <sys/mman.h> // for mmap +#include <sys/resource.h> // for getrlimit +#include <limits.h> // for PAGESIZE +#include <unistd.h> // for sysconf +#endif + +#include <algorithm> + +LockedPoolManager* LockedPoolManager::_instance = NULL; +std::once_flag LockedPoolManager::init_flag; + +/*******************************************************************************/ +// Utilities +// +/** Align up to power of 2 */ +static inline size_t align_up(size_t x, size_t align) +{ + return (x + align - 1) & ~(align - 1); +} + +/*******************************************************************************/ +// Implementation: Arena + +Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): + base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in) +{ + // Start with one free chunk that covers the entire arena + chunks_free.emplace(base, size_in); +} + +Arena::~Arena() +{ +} + +void* Arena::alloc(size_t size) +{ + // Round to next multiple of alignment + size = align_up(size, alignment); + + // Don't handle zero-sized chunks + if (size == 0) + return nullptr; + + // Pick a large enough free-chunk + auto it = std::find_if(chunks_free.begin(), chunks_free.end(), + [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; }); + if (it == chunks_free.end()) + return nullptr; + + // Create the used-chunk, taking its space from the end of the free-chunk + auto alloced = chunks_used.emplace(it->first + it->second - size, size).first; + if (!(it->second -= size)) + chunks_free.erase(it); + return reinterpret_cast<void*>(alloced->first); +} + +/* extend the Iterator if other begins at its end */ +template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) { + if (it->first + it->second == other.first) { + it->second += other.second; + return true; + } + return false; +} + +void Arena::free(void *ptr) +{ + // Freeing the NULL pointer is OK. + if (ptr == nullptr) { + return; + } + + // Remove chunk from used map + auto i = chunks_used.find(static_cast<char*>(ptr)); + if (i == chunks_used.end()) { + throw std::runtime_error("Arena: invalid or double free"); + } + auto freed = *i; + chunks_used.erase(i); + + // Add space to free map, coalescing contiguous chunks + auto next = chunks_free.upper_bound(freed.first); + auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next); + if (prev == chunks_free.end() || !extend(prev, freed)) + prev = chunks_free.emplace_hint(next, freed); + if (next != chunks_free.end() && extend(prev, *next)) + chunks_free.erase(next); +} + +Arena::Stats Arena::stats() const +{ + Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() }; + for (const auto& chunk: chunks_used) + r.used += chunk.second; + for (const auto& chunk: chunks_free) + r.free += chunk.second; + r.total = r.used + r.free; + return r; +} + +#ifdef ARENA_DEBUG +void printchunk(char* base, size_t sz, bool used) { + std::cout << + "0x" << std::hex << std::setw(16) << std::setfill('0') << base << + " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz << + " 0x" << used << std::endl; +} +void Arena::walk() const +{ + for (const auto& chunk: chunks_used) + printchunk(chunk.first, chunk.second, true); + std::cout << std::endl; + for (const auto& chunk: chunks_free) + printchunk(chunk.first, chunk.second, false); + std::cout << std::endl; +} +#endif + +/*******************************************************************************/ +// Implementation: Win32LockedPageAllocator + +#ifdef WIN32 +/** LockedPageAllocator specialized for Windows. + */ +class Win32LockedPageAllocator: public LockedPageAllocator +{ +public: + Win32LockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +Win32LockedPageAllocator::Win32LockedPageAllocator() +{ + // Determine system page size in bytes + SYSTEM_INFO sSysInfo; + GetSystemInfo(&sSysInfo); + page_size = sSysInfo.dwPageSize; +} +void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + len = align_up(len, page_size); + void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (addr) { + // VirtualLock is used to attempt to keep keying material out of swap. Note + // that it does not provide this as a guarantee, but, in practice, memory + // that has been VirtualLock'd almost never gets written to the pagefile + // except in rare circumstances where memory is extremely low. + *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0; + } + return addr; +} +void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + VirtualUnlock(const_cast<void*>(addr), len); +} + +size_t Win32LockedPageAllocator::GetLimit() +{ + // TODO is there a limit on windows, how to get it? + return std::numeric_limits<size_t>::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: PosixLockedPageAllocator + +#ifndef WIN32 +/** LockedPageAllocator specialized for OSes that don't try to be + * special snowflakes. + */ +class PosixLockedPageAllocator: public LockedPageAllocator +{ +public: + PosixLockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +PosixLockedPageAllocator::PosixLockedPageAllocator() +{ + // Determine system page size in bytes +#if defined(PAGESIZE) // defined in limits.h + page_size = PAGESIZE; +#else // assume some POSIX OS + page_size = sysconf(_SC_PAGESIZE); +#endif +} + +// Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define +// MAP_ANON which is deprecated +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + void *addr; + len = align_up(len, page_size); + addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (addr) { + *lockingSuccess = mlock(addr, len) == 0; + } + return addr; +} +void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + munlock(addr, len); + munmap(addr, len); +} +size_t PosixLockedPageAllocator::GetLimit() +{ +#ifdef RLIMIT_MEMLOCK + struct rlimit rlim; + if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { + if (rlim.rlim_cur != RLIM_INFINITY) { + return rlim.rlim_cur; + } + } +#endif + return std::numeric_limits<size_t>::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: LockedPool + +LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in): + allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) +{ +} + +LockedPool::~LockedPool() +{ +} +void* LockedPool::alloc(size_t size) +{ + std::lock_guard<std::mutex> lock(mutex); + + // Don't handle impossible sizes + if (size == 0 || size > ARENA_SIZE) + return nullptr; + + // Try allocating from each current arena + for (auto &arena: arenas) { + void *addr = arena.alloc(size); + if (addr) { + return addr; + } + } + // If that fails, create a new one + if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { + return arenas.back().alloc(size); + } + return nullptr; +} + +void LockedPool::free(void *ptr) +{ + std::lock_guard<std::mutex> lock(mutex); + // TODO we can do better than this linear search by keeping a map of arena + // extents to arena, and looking up the address. + for (auto &arena: arenas) { + if (arena.addressInArena(ptr)) { + arena.free(ptr); + return; + } + } + throw std::runtime_error("LockedPool: invalid address not pointing to any arena"); +} + +LockedPool::Stats LockedPool::stats() const +{ + std::lock_guard<std::mutex> lock(mutex); + LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0}; + for (const auto &arena: arenas) { + Arena::Stats i = arena.stats(); + r.used += i.used; + r.free += i.free; + r.total += i.total; + r.chunks_used += i.chunks_used; + r.chunks_free += i.chunks_free; + } + return r; +} + +bool LockedPool::new_arena(size_t size, size_t align) +{ + bool locked; + // If this is the first arena, handle this specially: Cap the upper size + // by the process limit. This makes sure that the first arena will at least + // be locked. An exception to this is if the process limit is 0: + // in this case no memory can be locked at all so we'll skip past this logic. + if (arenas.empty()) { + size_t limit = allocator->GetLimit(); + if (limit > 0) { + size = std::min(size, limit); + } + } + void *addr = allocator->AllocateLocked(size, &locked); + if (!addr) { + return false; + } + if (locked) { + cumulative_bytes_locked += size; + } else if (lf_cb) { // Call the locking-failed callback if locking failed + if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed. + allocator->FreeLocked(addr, size); + return false; + } + } + arenas.emplace_back(allocator.get(), addr, size, align); + return true; +} + +LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in): + Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) +{ +} +LockedPool::LockedPageArena::~LockedPageArena() +{ + allocator->FreeLocked(base, size); +} + +/*******************************************************************************/ +// Implementation: LockedPoolManager +// +LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator_in): + LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed) +{ +} + +bool LockedPoolManager::LockingFailed() +{ + // TODO: log something but how? without including util.h + return true; +} + +void LockedPoolManager::CreateInstance() +{ + // Using a local static instance guarantees that the object is initialized + // when it's first needed and also deinitialized after all objects that use + // it are done with it. I can think of one unlikely scenario where we may + // have a static deinitialization order/problem, but the check in + // LockedPoolManagerBase's destructor helps us detect if that ever happens. +#ifdef WIN32 + std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator()); +#else + std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator()); +#endif + static LockedPoolManager instance(std::move(allocator)); + LockedPoolManager::_instance = &instance; +} diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h new file mode 100644 index 0000000000..f5212bc266 --- /dev/null +++ b/src/support/lockedpool.h @@ -0,0 +1,231 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H +#define BITCOIN_SUPPORT_LOCKEDPOOL_H + +#include <stdint.h> +#include <list> +#include <map> +#include <mutex> +#include <memory> + +/** + * OS-dependent allocation and deallocation of locked/pinned memory pages. + * Abstract base class. + */ +class LockedPageAllocator +{ +public: + virtual ~LockedPageAllocator() {} + /** Allocate and lock memory pages. + * If len is not a multiple of the system page size, it is rounded up. + * Returns 0 in case of allocation failure. + * + * If locking the memory pages could not be accomplished it will still + * return the memory, however the lockingSuccess flag will be false. + * lockingSuccess is undefined if the allocation fails. + */ + virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0; + + /** Unlock and free memory pages. + * Clear the memory before unlocking. + */ + virtual void FreeLocked(void* addr, size_t len) = 0; + + /** Get the total limit on the amount of memory that may be locked by this + * process, in bytes. Return size_t max if there is no limit or the limit + * is unknown. Return 0 if no memory can be locked at all. + */ + virtual size_t GetLimit() = 0; +}; + +/* An arena manages a contiguous region of memory by dividing it into + * chunks. + */ +class Arena +{ +public: + Arena(void *base, size_t size, size_t alignment); + virtual ~Arena(); + + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t chunks_used; + size_t chunks_free; + }; + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get arena usage statistics */ + Stats stats() const; + +#ifdef ARENA_DEBUG + void walk() const; +#endif + + /** Return whether a pointer points inside this arena. + * This returns base <= ptr < (base+size) so only use it for (inclusive) + * chunk starting addresses. + */ + bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } +private: + Arena(const Arena& other) = delete; // non construction-copyable + Arena& operator=(const Arena&) = delete; // non copyable + + /** Map of chunk address to chunk information. This class makes use of the + * sorted order to merge previous and next chunks during deallocation. + */ + std::map<char*, size_t> chunks_free; + std::map<char*, size_t> chunks_used; + /** Base address of arena */ + char* base; + /** End address of arena */ + char* end; + /** Minimum chunk alignment */ + size_t alignment; +}; + +/** Pool for locked memory chunks. + * + * To avoid sensitive key data from being swapped to disk, the memory in this pool + * is locked/pinned. + * + * An arena manages a contiguous region of memory. The pool starts out with one arena + * but can grow to multiple arenas if the need arises. + * + * Unlike a normal C heap, the administrative structures are separate from the managed + * memory. This has been done as the sizes and bases of objects are not in themselves sensitive + * information, as to conserve precious locked memory. In some operating systems + * the amount of memory that can be locked is small. + */ +class LockedPool +{ +public: + /** Size of one arena of locked memory. This is a compromise. + * Do not set this too low, as managing many arenas will increase + * allocation and deallocation overhead. Setting it too high allocates + * more locked memory from the OS than strictly necessary. + */ + static const size_t ARENA_SIZE = 256*1024; + /** Chunk alignment. Another compromise. Setting this too high will waste + * memory, setting it too low will facilitate fragmentation. + */ + static const size_t ARENA_ALIGN = 16; + + /** Callback when allocation succeeds but locking fails. + */ + typedef bool (*LockingFailed_Callback)(); + + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t locked; + size_t chunks_used; + size_t chunks_free; + }; + + /** Create a new LockedPool. This takes ownership of the MemoryPageLocker, + * you can only instantiate this with LockedPool(std::move(...)). + * + * The second argument is an optional callback when locking a newly allocated arena failed. + * If this callback is provided and returns false, the allocation fails (hard fail), if + * it returns true the allocation proceeds, but it could warn. + */ + LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0); + ~LockedPool(); + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get pool usage statistics */ + Stats stats() const; +private: + LockedPool(const LockedPool& other) = delete; // non construction-copyable + LockedPool& operator=(const LockedPool&) = delete; // non copyable + + std::unique_ptr<LockedPageAllocator> allocator; + + /** Create an arena from locked pages */ + class LockedPageArena: public Arena + { + public: + LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); + ~LockedPageArena(); + private: + void *base; + size_t size; + LockedPageAllocator *allocator; + }; + + bool new_arena(size_t size, size_t align); + + std::list<LockedPageArena> arenas; + LockingFailed_Callback lf_cb; + size_t cumulative_bytes_locked; + /** Mutex protects access to this pool's data structures, including arenas. + */ + mutable std::mutex mutex; +}; + +/** + * Singleton class to keep track of locked (ie, non-swappable) memory, for use in + * std::allocator templates. + * + * Some implementations of the STL allocate memory in some constructors (i.e., see + * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.) + * Due to the unpredictable order of static initializers, we have to make sure the + * LockedPoolManager instance exists before any other STL-based objects that use + * secure_allocator are created. So instead of having LockedPoolManager also be + * static-initialized, it is created on demand. + */ +class LockedPoolManager : public LockedPool +{ +public: + /** Return the current instance, or create it once */ + static LockedPoolManager& Instance() + { + std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); + return *LockedPoolManager::_instance; + } + +private: + LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator); + + /** Create a new LockedPoolManager specialized to the OS */ + static void CreateInstance(); + /** Called when locking fails, warn the user here */ + static bool LockingFailed(); + + static LockedPoolManager* _instance; + static std::once_flag init_flag; +}; + +#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H diff --git a/src/support/pagelocker.cpp b/src/support/pagelocker.cpp deleted file mode 100644 index 7cea2d88c5..0000000000 --- a/src/support/pagelocker.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2009-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include "support/pagelocker.h" - -#if defined(HAVE_CONFIG_H) -#include "config/bitcoin-config.h" -#endif - -#ifdef WIN32 -#ifdef _WIN32_WINNT -#undef _WIN32_WINNT -#endif -#define _WIN32_WINNT 0x0501 -#define WIN32_LEAN_AND_MEAN 1 -#ifndef NOMINMAX -#define NOMINMAX -#endif -#include <windows.h> -// This is used to attempt to keep keying material out of swap -// Note that VirtualLock does not provide this as a guarantee on Windows, -// but, in practice, memory that has been VirtualLock'd almost never gets written to -// the pagefile except in rare circumstances where memory is extremely low. -#else -#include <sys/mman.h> -#include <limits.h> // for PAGESIZE -#include <unistd.h> // for sysconf -#endif - -LockedPageManager* LockedPageManager::_instance = NULL; -boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT; - -/** Determine system page size in bytes */ -static inline size_t GetSystemPageSize() -{ - size_t page_size; -#if defined(WIN32) - SYSTEM_INFO sSysInfo; - GetSystemInfo(&sSysInfo); - page_size = sSysInfo.dwPageSize; -#elif defined(PAGESIZE) // defined in limits.h - page_size = PAGESIZE; -#else // assume some POSIX OS - page_size = sysconf(_SC_PAGESIZE); -#endif - return page_size; -} - -bool MemoryPageLocker::Lock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualLock(const_cast<void*>(addr), len) != 0; -#else - return mlock(addr, len) == 0; -#endif -} - -bool MemoryPageLocker::Unlock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualUnlock(const_cast<void*>(addr), len) != 0; -#else - return munlock(addr, len) == 0; -#endif -} - -LockedPageManager::LockedPageManager() : LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize()) -{ -} diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h deleted file mode 100644 index 6b3979e551..0000000000 --- a/src/support/pagelocker.h +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_SUPPORT_PAGELOCKER_H -#define BITCOIN_SUPPORT_PAGELOCKER_H - -#include "support/cleanse.h" - -#include <map> - -#include <boost/thread/mutex.hpp> -#include <boost/thread/once.hpp> - -/** - * Thread-safe class to keep track of locked (ie, non-swappable) memory pages. - * - * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock() - * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when - * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page. - * - * @note By using a map from each page base address to lock count, this class is optimized for - * small objects that span up to a few pages, mostly smaller than a page. To support large allocations, - * something like an interval tree would be the preferred data structure. - */ -template <class Locker> -class LockedPageManagerBase -{ -public: - LockedPageManagerBase(size_t page_size) : page_size(page_size) - { - // Determine bitmask for extracting page from address - assert(!(page_size & (page_size - 1))); // size must be power of two - page_mask = ~(page_size - 1); - } - - ~LockedPageManagerBase() - { - } - - - // For all pages in affected range, increase lock count - void LockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast<size_t>(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - if (it == histogram.end()) // Newly locked page - { - locker.Lock(reinterpret_cast<void*>(page), page_size); - histogram.insert(std::make_pair(page, 1)); - } else // Page was already locked; increase counter - { - it->second += 1; - } - } - } - - // For all pages in affected range, decrease lock count - void UnlockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast<size_t>(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - assert(it != histogram.end()); // Cannot unlock an area that was not locked - // Decrease counter for page, when it is zero, the page will be unlocked - it->second -= 1; - if (it->second == 0) // Nothing on the page anymore that keeps it locked - { - // Unlock page and remove the count from histogram - locker.Unlock(reinterpret_cast<void*>(page), page_size); - histogram.erase(it); - } - } - } - - // Get number of locked pages for diagnostics - int GetLockedPageCount() - { - boost::mutex::scoped_lock lock(mutex); - return histogram.size(); - } - -private: - Locker locker; - boost::mutex mutex; - size_t page_size, page_mask; - // map of page base address to lock count - typedef std::map<size_t, int> Histogram; - Histogram histogram; -}; - - -/** - * OS-dependent memory page locking/unlocking. - * Defined as policy class to make stubbing for test possible. - */ -class MemoryPageLocker -{ -public: - /** Lock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Lock(const void* addr, size_t len); - /** Unlock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Unlock(const void* addr, size_t len); -}; - -/** - * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in - * std::allocator templates. - * - * Some implementations of the STL allocate memory in some constructors (i.e., see - * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.) - * Due to the unpredictable order of static initializers, we have to make sure the - * LockedPageManager instance exists before any other STL-based objects that use - * secure_allocator are created. So instead of having LockedPageManager also be - * static-initialized, it is created on demand. - */ -class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker> -{ -public: - static LockedPageManager& Instance() - { - boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag); - return *LockedPageManager::_instance; - } - -private: - LockedPageManager(); - - static void CreateInstance() - { - // Using a local static instance guarantees that the object is initialized - // when it's first needed and also deinitialized after all objects that use - // it are done with it. I can think of one unlikely scenario where we may - // have a static deinitialization order/problem, but the check in - // LockedPageManagerBase's destructor helps us detect if that ever happens. - static LockedPageManager instance; - LockedPageManager::_instance = &instance; - } - - static LockedPageManager* _instance; - static boost::once_flag init_flag; -}; - -// -// Functions for directly locking/unlocking memory objects. -// Intended for non-dynamically allocated structures. -// -template <typename T> -void LockObject(const T& t) -{ - LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T)); -} - -template <typename T> -void UnlockObject(const T& t) -{ - memory_cleanse((void*)(&t), sizeof(T)); - LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T)); -} - -#endif // BITCOIN_SUPPORT_PAGELOCKER_H |