aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMarcoFalke <falke.marco@gmail.com>2020-08-04 17:00:08 +0200
committerMarcoFalke <falke.marco@gmail.com>2020-08-04 17:00:15 +0200
commit0f16212c5931b30f430014caa485de53f9a14920 (patch)
treee45f8e64223c529d01b226d4560771a27fadb71d /src
parent3c93623be20d534bb653410db03d38152135e2e6 (diff)
parent63e9e40b73507b0c9361fc8728d4e97fd72c9ec9 (diff)
Merge #19340: Preserve the `LockData` initial state if "potential deadlock detected" exception thrown
63e9e40b73507b0c9361fc8728d4e97fd72c9ec9 test: Add LockStackEmpty() (Hennadii Stepanov) 42b2a953735457cbf7b62a18b89811a66e573298 test: Repeat deadlock tests (Hennadii Stepanov) 1f96be25b020a56afa330286ee4f241aa14d3983 Preserve initial state if push_lock() throws exception (Hennadii Stepanov) Pull request description: On master (e3fa3c7d671e34038a262bb2db16b30bee82153d) if the `push_lock()` throws the "potential deadlock detected" exception (via the `potential_deadlock_detected()` call), the `LockData` instance internal state differs from one when the `push_lock()` was called. This non-well behaviour makes (at least) testing brittle. This PR preserves the `LockData` instance initial state if `push_lock()` throws an exception, and improves the `sync_tests` unit test. ACKs for top commit: MarcoFalke: re-ACK 63e9e40b73 vasild: ACK 63e9e40 Tree-SHA512: 7679182154ce5f079b44b790faf76eb5f553328dea70a326ff6b600db70e2f9ae015a33a104ca070cb660318280cb79b6b42e37ea5166f26f9e627ba721fcdec
Diffstat (limited to 'src')
-rw-r--r--src/sync.cpp22
-rw-r--r--src/sync.h14
-rw-r--r--src/test/sync_tests.cpp6
3 files changed, 33 insertions, 9 deletions
diff --git a/src/sync.cpp b/src/sync.cpp
index 10f0483189..4be13a3c48 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -149,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation)
const LockPair p1 = std::make_pair(i.first, c);
if (lockdata.lockorders.count(p1))
continue;
- lockdata.lockorders.emplace(p1, lock_stack);
const LockPair p2 = std::make_pair(c, i.first);
+ if (lockdata.lockorders.count(p2)) {
+ auto lock_stack_copy = lock_stack;
+ lock_stack.pop_back();
+ potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
+ // potential_deadlock_detected() does not return.
+ }
+
+ lockdata.lockorders.emplace(p1, lock_stack);
lockdata.invlockorders.insert(p2);
- if (lockdata.lockorders.count(p2))
- potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
@@ -259,6 +264,17 @@ void DeleteLock(void* cs)
}
}
+bool LockStackEmpty()
+{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+ const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
+ if (it == lockdata.m_lock_stacks.end()) {
+ return true;
+ }
+ return it->second.empty();
+}
+
bool g_debug_lockorder_abort = true;
#endif /* DEBUG_LOCKORDER */
diff --git a/src/sync.h b/src/sync.h
index 77327d8bfe..05ff2ee8a9 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -56,6 +56,7 @@ template <typename MutexType>
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
+bool LockStackEmpty();
/**
* Call abort() if a potential lock order deadlock bug is detected, instead of
@@ -64,13 +65,14 @@ void DeleteLock(void* cs);
*/
extern bool g_debug_lockorder_abort;
#else
-void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
-void static inline LeaveCritical() {}
-void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
+inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
+inline void LeaveCritical() {}
+inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
template <typename MutexType>
-void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
-void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
-void static inline DeleteLock(void* cs) {}
+inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
+inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
+inline void DeleteLock(void* cs) {}
+inline bool LockStackEmpty() { return true; }
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 3ea8714f3a..19029ebd3c 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -14,6 +14,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
{
LOCK2(mutex1, mutex2);
}
+ BOOST_CHECK(LockStackEmpty());
bool error_thrown = false;
try {
LOCK2(mutex2, mutex1);
@@ -21,6 +22,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1");
error_thrown = true;
}
+ BOOST_CHECK(LockStackEmpty());
#ifdef DEBUG_LOCKORDER
BOOST_CHECK(error_thrown);
#else
@@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected)
RecursiveMutex rmutex1, rmutex2;
TestPotentialDeadLockDetected(rmutex1, rmutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(rmutex1, rmutex2);
Mutex mutex1, mutex2;
TestPotentialDeadLockDetected(mutex1, mutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(mutex1, mutex2);
#ifdef DEBUG_LOCKORDER
g_debug_lockorder_abort = prev;