aboutsummaryrefslogtreecommitdiff
path: root/src/test/cuckoocache_tests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/cuckoocache_tests.cpp')
-rw-r--r--src/test/cuckoocache_tests.cpp47
1 files changed, 16 insertions, 31 deletions
diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp
index dbceb9d2e0..d8286520ec 100644
--- a/src/test/cuckoocache_tests.cpp
+++ b/src/test/cuckoocache_tests.cpp
@@ -21,40 +21,23 @@
* using BOOST_CHECK_CLOSE to fail.
*
*/
-FastRandomContext local_rand_ctx(true);
-
BOOST_AUTO_TEST_SUITE(cuckoocache_tests);
-
-/** insecure_GetRandHash fills in a uint256 from local_rand_ctx
- */
-static void insecure_GetRandHash(uint256& t)
-{
- uint32_t* ptr = (uint32_t*)t.begin();
- for (uint8_t j = 0; j < 8; ++j)
- *(ptr++) = local_rand_ctx.rand32();
-}
-
-
-
/* Test that no values not inserted into the cache are read out of it.
*
* There are no repeats in the first 200000 insecure_GetRandHash calls
*/
BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
{
- local_rand_ctx = FastRandomContext(true);
+ SeedInsecureRand(true);
CuckooCache::cache<uint256, SignatureCacheHasher> cc{};
size_t megabytes = 4;
cc.setup_bytes(megabytes << 20);
- uint256 v;
for (int x = 0; x < 100000; ++x) {
- insecure_GetRandHash(v);
- cc.insert(v);
+ cc.insert(InsecureRand256());
}
for (int x = 0; x < 100000; ++x) {
- insecure_GetRandHash(v);
- BOOST_CHECK(!cc.contains(v, false));
+ BOOST_CHECK(!cc.contains(InsecureRand256(), false));
}
};
@@ -64,7 +47,7 @@ BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
template <typename Cache>
static double test_cache(size_t megabytes, double load)
{
- local_rand_ctx = FastRandomContext(true);
+ SeedInsecureRand(true);
std::vector<uint256> hashes;
Cache set{};
size_t bytes = megabytes * (1 << 20);
@@ -74,7 +57,7 @@ static double test_cache(size_t megabytes, double load)
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)hashes[i].begin();
for (uint8_t j = 0; j < 8; ++j)
- *(ptr++) = local_rand_ctx.rand32();
+ *(ptr++) = InsecureRand32();
}
/** We make a copy of the hashes because future optimizations of the
* cuckoocache may overwrite the inserted element, so the test is
@@ -135,7 +118,7 @@ template <typename Cache>
static void test_cache_erase(size_t megabytes)
{
double load = 1;
- local_rand_ctx = FastRandomContext(true);
+ SeedInsecureRand(true);
std::vector<uint256> hashes;
Cache set{};
size_t bytes = megabytes * (1 << 20);
@@ -145,7 +128,7 @@ static void test_cache_erase(size_t megabytes)
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)hashes[i].begin();
for (uint8_t j = 0; j < 8; ++j)
- *(ptr++) = local_rand_ctx.rand32();
+ *(ptr++) = InsecureRand32();
}
/** We make a copy of the hashes because future optimizations of the
* cuckoocache may overwrite the inserted element, so the test is
@@ -158,7 +141,7 @@ static void test_cache_erase(size_t megabytes)
set.insert(hashes_insert_copy[i]);
/** Erase the first quarter */
for (uint32_t i = 0; i < (n_insert / 4); ++i)
- set.contains(hashes[i], true);
+ BOOST_CHECK(set.contains(hashes[i], true));
/** Insert the second half */
for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
set.insert(hashes_insert_copy[i]);
@@ -198,7 +181,7 @@ template <typename Cache>
static void test_cache_erase_parallel(size_t megabytes)
{
double load = 1;
- local_rand_ctx = FastRandomContext(true);
+ SeedInsecureRand(true);
std::vector<uint256> hashes;
Cache set{};
size_t bytes = megabytes * (1 << 20);
@@ -208,7 +191,7 @@ static void test_cache_erase_parallel(size_t megabytes)
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)hashes[i].begin();
for (uint8_t j = 0; j < 8; ++j)
- *(ptr++) = local_rand_ctx.rand32();
+ *(ptr++) = InsecureRand32();
}
/** We make a copy of the hashes because future optimizations of the
* cuckoocache may overwrite the inserted element, so the test is
@@ -237,8 +220,10 @@ static void test_cache_erase_parallel(size_t megabytes)
size_t ntodo = (n_insert/4)/3;
size_t start = ntodo*x;
size_t end = ntodo*(x+1);
- for (uint32_t i = start; i < end; ++i)
- set.contains(hashes[i], true);
+ for (uint32_t i = start; i < end; ++i) {
+ bool contains = set.contains(hashes[i], true);
+ assert(contains);
+ }
});
/** Wait for all threads to finish
@@ -300,7 +285,7 @@ static void test_cache_generations()
// iterations with non-deterministic values, so it isn't "overfit" to the
// specific entropy in FastRandomContext(true) and implementation of the
// cache.
- local_rand_ctx = FastRandomContext(true);
+ SeedInsecureRand(true);
// block_activity models a chunk of network activity. n_insert elements are
// added to the cache. The first and last n/4 are stored for removal later
@@ -317,7 +302,7 @@ static void test_cache_generations()
for (uint32_t i = 0; i < n_insert; ++i) {
uint32_t* ptr = (uint32_t*)inserts[i].begin();
for (uint8_t j = 0; j < 8; ++j)
- *(ptr++) = local_rand_ctx.rand32();
+ *(ptr++) = InsecureRand32();
}
for (uint32_t i = 0; i < n_insert / 4; ++i)
reads.push_back(inserts[i]);