aboutsummaryrefslogtreecommitdiff
path: root/src/allocators.h
diff options
context:
space:
mode:
authorChuck <chuck@borboggle.com>2013-10-01 17:23:17 +0700
committerChuck <chuck@borboggle.com>2013-10-20 14:29:24 +0700
commit0b8f47dc531d3cbaf172a5e17f27524a40833dba (patch)
treeeb88d649af3af86b65f914206dfe30ad06230d33 /src/allocators.h
parent896853a011f6681d41bc585e020d74a7f2fece88 (diff)
Changing LockedPageManager to use a managed instance
This ensures the allocator is ready no matter when it's needed (as some STL implementations allocate in constructors -- i.e., MSVC's STL in debug builds). Using boost::call_once to guarantee thread-safe static initialization. Adding some comments describing why the change was made. Addressing deinitialization of the LockedPageManager object by initializing it in a local static initializer and adding an assert in the base's destructor.
Diffstat (limited to 'src/allocators.h')
-rw-r--r--src/allocators.h43
1 files changed, 38 insertions, 5 deletions
diff --git a/src/allocators.h b/src/allocators.h
index fd6f51b27e..b199f2dd44 100644
--- a/src/allocators.h
+++ b/src/allocators.h
@@ -8,6 +8,7 @@
#include <string.h>
#include <string>
#include <boost/thread/mutex.hpp>
+#include <boost/thread/once.hpp>
#include <map>
#include <openssl/crypto.h> // for OPENSSL_cleanse()
@@ -34,6 +35,12 @@ public:
page_mask = ~(page_size - 1);
}
+ ~LockedPageManagerBase()
+ {
+ assert(this->GetLockedPageCount() == 0);
+ }
+
+
// For all pages in affected range, increase lock count
void LockRange(void *p, size_t size)
{
@@ -117,13 +124,39 @@ public:
/**
* Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
* std::allocator templates.
+ *
+ * Some implementations of the STL allocate memory in some constructors (i.e., see
+ * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
+ * Due to the unpredictable order of static initializers, we have to make sure the
+ * LockedPageManager instance exists before any other STL-based objects that use
+ * secure_allocator are created. So instead of having LockedPageManager also be
+ * static-intialized, it is created on demand.
*/
class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
{
public:
- static LockedPageManager instance; // instantiated in util.cpp
+ static LockedPageManager& Instance()
+ {
+ boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
+ return *LockedPageManager::_instance;
+ }
+
private:
LockedPageManager();
+
+ static void CreateInstance()
+ {
+ // Using a local static instance guarantees that the object is initialized
+ // when it's first needed and also deinitialized after all objects that use
+ // it are done with it. I can think of one unlikely scenario where we may
+ // have a static deinitialization order/problem, but the check in
+ // LockedPageManagerBase's destructor helps us detect if that ever happens.
+ static LockedPageManager instance;
+ LockedPageManager::_instance = &instance;
+ }
+
+ static LockedPageManager* _instance;
+ static boost::once_flag init_flag;
};
//
@@ -131,12 +164,12 @@ private:
// Intended for non-dynamically allocated structures.
//
template<typename T> void LockObject(const T &t) {
- LockedPageManager::instance.LockRange((void*)(&t), sizeof(T));
+ LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
}
template<typename T> void UnlockObject(const T &t) {
OPENSSL_cleanse((void*)(&t), sizeof(T));
- LockedPageManager::instance.UnlockRange((void*)(&t), sizeof(T));
+ LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
}
//
@@ -168,7 +201,7 @@ struct secure_allocator : public std::allocator<T>
T *p;
p = std::allocator<T>::allocate(n, hint);
if (p != NULL)
- LockedPageManager::instance.LockRange(p, sizeof(T) * n);
+ LockedPageManager::Instance().LockRange(p, sizeof(T) * n);
return p;
}
@@ -177,7 +210,7 @@ struct secure_allocator : public std::allocator<T>
if (p != NULL)
{
OPENSSL_cleanse(p, sizeof(T) * n);
- LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
+ LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n);
}
std::allocator<T>::deallocate(p, n);
}