aboutsummaryrefslogtreecommitdiff
path: root/src/test/checkqueue_tests.cpp
diff options
context:
space:
mode:
authorJeremy Rubin <jeremy.l.rubin@gmail.com>2017-03-27 14:25:18 -0400
committerJeremy Rubin <jeremy.l.rubin@gmail.com>2017-08-09 02:07:11 -0700
commit8c2f4b88828b3e40f6cc690261657e66b2653432 (patch)
tree6ec9c7632491d257acfa1f03b02fb07287ce8469 /src/test/checkqueue_tests.cpp
parent929fd7276c0f0c30b9416f61a6f5f35d763d81e4 (diff)
Expose more parallelism with relaxed atomics (suggested in #9938). Fix a test to check the exclusive or of two properties rather than just or.
Diffstat (limited to 'src/test/checkqueue_tests.cpp')
-rw-r--r--src/test/checkqueue_tests.cpp33
1 files changed, 18 insertions, 15 deletions
diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp
index 6ae0bcadd0..c4564b45b0 100644
--- a/src/test/checkqueue_tests.cpp
+++ b/src/test/checkqueue_tests.cpp
@@ -38,7 +38,7 @@ struct FakeCheckCheckCompletion {
static std::atomic<size_t> n_calls;
bool operator()()
{
- ++n_calls;
+ n_calls.fetch_add(1, std::memory_order_relaxed);
return true;
}
void swap(FakeCheckCheckCompletion& x){};
@@ -88,15 +88,15 @@ struct MemoryCheck {
//
// Really, copy constructor should be deletable, but CCheckQueue breaks
// if it is deleted because of internal push_back.
- fake_allocated_memory += b;
+ fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
};
MemoryCheck(bool b_) : b(b_)
{
- fake_allocated_memory += b;
+ fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
};
- ~MemoryCheck(){
- fake_allocated_memory -= b;
-
+ ~MemoryCheck()
+ {
+ fake_allocated_memory.fetch_sub(b, std::memory_order_relaxed);
};
void swap(MemoryCheck& x) { std::swap(b, x.b); };
};
@@ -117,9 +117,9 @@ struct FrozenCleanupCheck {
{
if (should_freeze) {
std::unique_lock<std::mutex> l(m);
- nFrozen = 1;
+ nFrozen.store(1, std::memory_order_relaxed);
cv.notify_one();
- cv.wait(l, []{ return nFrozen == 0;});
+ cv.wait(l, []{ return nFrozen.load(std::memory_order_relaxed) == 0;});
}
}
void swap(FrozenCleanupCheck& x){std::swap(should_freeze, x.should_freeze);};
@@ -262,7 +262,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueue_Recovers_From_Failure)
control.Add(vChecks);
}
bool r =control.Wait();
- BOOST_REQUIRE(r || end_fails);
+ BOOST_REQUIRE(r != end_fails);
}
}
tg.interrupt_all();
@@ -337,7 +337,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueue_Memory)
tg.join_all();
}
-// Test that a new verification cannot occur until all checks
+// Test that a new verification cannot occur until all checks
// have been destructed
BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup)
{
@@ -361,11 +361,14 @@ BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup)
std::unique_lock<std::mutex> l(FrozenCleanupCheck::m);
// Wait until the queue has finished all jobs and frozen
FrozenCleanupCheck::cv.wait(l, [](){return FrozenCleanupCheck::nFrozen == 1;});
- // Try to get control of the queue a bunch of times
- for (auto x = 0; x < 100 && !fails; ++x) {
- fails = queue->ControlMutex.try_lock();
- }
- // Unfreeze
+ }
+ // Try to get control of the queue a bunch of times
+ for (auto x = 0; x < 100 && !fails; ++x) {
+ fails = queue->ControlMutex.try_lock();
+ }
+ {
+ // Unfreeze (we need lock n case of spurious wakeup)
+ std::unique_lock<std::mutex> l(FrozenCleanupCheck::m);
FrozenCleanupCheck::nFrozen = 0;
}
// Awaken frozen destructor