aboutsummaryrefslogtreecommitdiff
path: root/src/test/allocator_tests.cpp
diff options
context:
space:
mode:
authorWladimir J. van der Laan <laanwj@gmail.com>2012-08-22 11:34:32 +0200
committerWladimir J. van der Laan <laanwj@gmail.com>2012-08-23 06:55:35 +0200
commite95568b78ddead0173339ca98df6cd92131ceb62 (patch)
tree3860dc256f0c80a4764ef79bb1fa6fa3ec814392 /src/test/allocator_tests.cpp
parentfedd060d819e671388e9996f55af7a70f270f0b9 (diff)
Handle locked pages more robustly (Fixes issue #1462)
Memory locks do not stack, that is, pages which have been locked several times by calls to mlock() will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when those functions are used naively. In this commit a class "LockedPageManager" is added that simulates stacking memory locks by keeping a counter per page.
Diffstat (limited to 'src/test/allocator_tests.cpp')
-rw-r--r--src/test/allocator_tests.cpp115
1 files changed, 115 insertions, 0 deletions
diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp
new file mode 100644
index 0000000000..d5cb8e8101
--- /dev/null
+++ b/src/test/allocator_tests.cpp
@@ -0,0 +1,115 @@
+#include <boost/test/unit_test.hpp>
+
+#include "init.h"
+#include "main.h"
+#include "util.h"
+
+BOOST_AUTO_TEST_SUITE(allocator_tests)
+
+// Dummy memory page locker for platform independent tests
+static const void *last_lock_addr, *last_unlock_addr;
+static size_t last_lock_len, last_unlock_len;
+class TestLocker
+{
+public:
+ bool Lock(const void *addr, size_t len)
+ {
+ last_lock_addr = addr;
+ last_lock_len = len;
+ return true;
+ }
+ bool Unlock(const void *addr, size_t len)
+ {
+ last_unlock_addr = addr;
+ last_unlock_len = len;
+ return true;
+ }
+};
+
+BOOST_AUTO_TEST_CASE(test_LockedPageManagerBase)
+{
+ const size_t test_page_size = 4096;
+ LockedPageManagerBase<TestLocker> lpm(test_page_size);
+ size_t addr;
+ last_lock_addr = last_unlock_addr = 0;
+ last_lock_len = last_unlock_len = 0;
+
+ /* Try large number of small objects */
+ addr = 0;
+ for(int i=0; i<1000; ++i)
+ {
+ lpm.LockRange(reinterpret_cast<void*>(addr), 33);
+ addr += 33;
+ }
+ /* Try small number of page-sized objects, straddling two pages */
+ addr = test_page_size*100 + 53;
+ for(int i=0; i<100; ++i)
+ {
+ lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
+ addr += test_page_size;
+ }
+ /* Try small number of page-sized objects aligned to exactly one page */
+ addr = test_page_size*300;
+ for(int i=0; i<100; ++i)
+ {
+ lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
+ addr += test_page_size;
+ }
+ /* one very large object, straddling pages */
+ lpm.LockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
+ BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(600+500)));
+ /* one very large object, page aligned */
+ lpm.LockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
+ BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(1200+500-1)));
+
+ BOOST_CHECK(lpm.GetLockedPageCount() == (
+ (1000*33+test_page_size-1)/test_page_size + // small objects
+ 101 + 100 + // page-sized objects
+ 501 + 500)); // large objects
+ BOOST_CHECK((last_lock_len & (test_page_size-1)) == 0); // always lock entire pages
+ BOOST_CHECK(last_unlock_len == 0); // nothing unlocked yet
+
+ /* And unlock again */
+ addr = 0;
+ for(int i=0; i<1000; ++i)
+ {
+ lpm.UnlockRange(reinterpret_cast<void*>(addr), 33);
+ addr += 33;
+ }
+ addr = test_page_size*100 + 53;
+ for(int i=0; i<100; ++i)
+ {
+ lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
+ addr += test_page_size;
+ }
+ addr = test_page_size*300;
+ for(int i=0; i<100; ++i)
+ {
+ lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
+ addr += test_page_size;
+ }
+ lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
+ lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
+
+ /* Check that everything is released */
+ BOOST_CHECK(lpm.GetLockedPageCount() == 0);
+
+ /* A few and unlocks of size zero (should have no effect) */
+ addr = 0;
+ for(int i=0; i<1000; ++i)
+ {
+ lpm.LockRange(reinterpret_cast<void*>(addr), 0);
+ addr += 1;
+ }
+ BOOST_CHECK(lpm.GetLockedPageCount() == 0);
+ addr = 0;
+ for(int i=0; i<1000; ++i)
+ {
+ lpm.UnlockRange(reinterpret_cast<void*>(addr), 0);
+ addr += 1;
+ }
+ BOOST_CHECK(lpm.GetLockedPageCount() == 0);
+ BOOST_CHECK((last_unlock_len & (test_page_size-1)) == 0); // always unlock entire pages
+}
+
+BOOST_AUTO_TEST_SUITE_END()