aboutsummaryrefslogtreecommitdiff
path: root/src/support
diff options
context:
space:
mode:
Diffstat (limited to 'src/support')
-rw-r--r--src/support/allocators/secure.h8
-rw-r--r--src/support/allocators/zeroafterfree.h8
-rw-r--r--src/support/cleanse.cpp30
-rw-r--r--src/support/lockedpool.h16
4 files changed, 44 insertions, 18 deletions
diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h
index f20f424941..39347c73bb 100644
--- a/src/support/allocators/secure.h
+++ b/src/support/allocators/secure.h
@@ -26,13 +26,13 @@ struct secure_allocator : public std::allocator<T> {
typedef typename base::reference reference;
typedef typename base::const_reference const_reference;
typedef typename base::value_type value_type;
- secure_allocator() throw() {}
- secure_allocator(const secure_allocator& a) throw() : base(a) {}
+ secure_allocator() noexcept {}
+ secure_allocator(const secure_allocator& a) noexcept : base(a) {}
template <typename U>
- secure_allocator(const secure_allocator<U>& a) throw() : base(a)
+ secure_allocator(const secure_allocator<U>& a) noexcept : base(a)
{
}
- ~secure_allocator() throw() {}
+ ~secure_allocator() noexcept {}
template <typename _Other>
struct rebind {
typedef secure_allocator<_Other> other;
diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h
index 581d5d6318..618874ceee 100644
--- a/src/support/allocators/zeroafterfree.h
+++ b/src/support/allocators/zeroafterfree.h
@@ -22,13 +22,13 @@ struct zero_after_free_allocator : public std::allocator<T> {
typedef typename base::reference reference;
typedef typename base::const_reference const_reference;
typedef typename base::value_type value_type;
- zero_after_free_allocator() throw() {}
- zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
+ zero_after_free_allocator() noexcept {}
+ zero_after_free_allocator(const zero_after_free_allocator& a) noexcept : base(a) {}
template <typename U>
- zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a)
+ zero_after_free_allocator(const zero_after_free_allocator<U>& a) noexcept : base(a)
{
}
- ~zero_after_free_allocator() throw() {}
+ ~zero_after_free_allocator() noexcept {}
template <typename _Other>
struct rebind {
typedef zero_after_free_allocator<_Other> other;
diff --git a/src/support/cleanse.cpp b/src/support/cleanse.cpp
index a2141b2449..95899c9f02 100644
--- a/src/support/cleanse.cpp
+++ b/src/support/cleanse.cpp
@@ -5,9 +5,35 @@
#include "cleanse.h"
-#include <openssl/crypto.h>
+#include <cstring>
+/* Compilers have a bad habit of removing "superfluous" memset calls that
+ * are trying to zero memory. For example, when memset()ing a buffer and
+ * then free()ing it, the compiler might decide that the memset is
+ * unobservable and thus can be removed.
+ *
+ * Previously we used OpenSSL which tried to stop this by a) implementing
+ * memset in assembly on x86 and b) putting the function in its own file
+ * for other platforms.
+ *
+ * This change removes those tricks in favour of using asm directives to
+ * scare the compiler away. As best as our compiler folks can tell, this is
+ * sufficient and will continue to be so.
+ *
+ * Adam Langley <agl@google.com>
+ * Commit: ad1907fe73334d6c696c8539646c21b11178f20f
+ * BoringSSL (LICENSE: ISC)
+ */
void memory_cleanse(void *ptr, size_t len)
{
- OPENSSL_cleanse(ptr, len);
+ std::memset(ptr, 0, len);
+
+ /* As best as we can tell, this is sufficient to break any optimisations that
+ might try to eliminate "superfluous" memsets. If there's an easy way to
+ detect memset_s, it would be better to use that. */
+#if defined(_MSC_VER)
+ __asm;
+#else
+ __asm__ __volatile__("" : : "r"(ptr) : "memory");
+#endif
}
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
index f5212bc266..834f0371e2 100644
--- a/src/support/lockedpool.h
+++ b/src/support/lockedpool.h
@@ -50,6 +50,9 @@ public:
Arena(void *base, size_t size, size_t alignment);
virtual ~Arena();
+ Arena(const Arena& other) = delete; // non construction-copyable
+ Arena& operator=(const Arena&) = delete; // non copyable
+
/** Memory statistics. */
struct Stats
{
@@ -85,9 +88,6 @@ public:
*/
bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
private:
- Arena(const Arena& other) = delete; // non construction-copyable
- Arena& operator=(const Arena&) = delete; // non copyable
-
/** Map of chunk address to chunk information. This class makes use of the
* sorted order to merge previous and next chunks during deallocation.
*/
@@ -150,9 +150,12 @@ public:
* If this callback is provided and returns false, the allocation fails (hard fail), if
* it returns true the allocation proceeds, but it could warn.
*/
- LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0);
+ explicit LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = nullptr);
~LockedPool();
+ LockedPool(const LockedPool& other) = delete; // non construction-copyable
+ LockedPool& operator=(const LockedPool&) = delete; // non copyable
+
/** Allocate size bytes from this arena.
* Returns pointer on success, or 0 if memory is full or
* the application tried to allocate 0 bytes.
@@ -168,9 +171,6 @@ public:
/** Get pool usage statistics */
Stats stats() const;
private:
- LockedPool(const LockedPool& other) = delete; // non construction-copyable
- LockedPool& operator=(const LockedPool&) = delete; // non copyable
-
std::unique_ptr<LockedPageAllocator> allocator;
/** Create an arena from locked pages */
@@ -217,7 +217,7 @@ public:
}
private:
- LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
+ explicit LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
/** Create a new LockedPoolManager specialized to the OS */
static void CreateInstance();