aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/sync.cpp55
-rw-r--r--src/sync.h33
2 files changed, 65 insertions, 23 deletions
diff --git a/src/sync.cpp b/src/sync.cpp
index 8df8ae43f4..641ed2c8ca 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -56,11 +56,24 @@ private:
};
typedef std::vector<std::pair<void*, CLockLocation> > LockStack;
+typedef std::map<std::pair<void*, void*>, LockStack> LockOrders;
+typedef std::set<std::pair<void*, void*> > InvLockOrders;
-static boost::mutex dd_mutex;
-static std::map<std::pair<void*, void*>, LockStack> lockorders;
-static boost::thread_specific_ptr<LockStack> lockstack;
+struct LockData {
+ // Very ugly hack: as the global constructs and destructors run single
+ // threaded, we use this boolean to know whether LockData still exists,
+ // as DeleteLock can get called by global CCriticalSection destructors
+ // after LockData disappears.
+ bool available;
+ LockData() : available(true) {}
+ ~LockData() { available = false; }
+ LockOrders lockorders;
+ InvLockOrders invlockorders;
+ boost::mutex dd_mutex;
+} static lockdata;
+
+boost::thread_specific_ptr<LockStack> lockstack;
static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, const LockStack& s1, const LockStack& s2)
{
@@ -117,7 +130,7 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry)
if (lockstack.get() == NULL)
lockstack.reset(new LockStack);
- dd_mutex.lock();
+ boost::unique_lock<boost::mutex> lock(lockdata.dd_mutex);
(*lockstack).push_back(std::make_pair(c, locklocation));
@@ -127,23 +140,21 @@ static void push_lock(void* c, const CLockLocation& locklocation, bool fTry)
break;
std::pair<void*, void*> p1 = std::make_pair(i.first, c);
- if (lockorders.count(p1))
+ if (lockdata.lockorders.count(p1))
continue;
- lockorders[p1] = (*lockstack);
+ lockdata.lockorders[p1] = (*lockstack);
std::pair<void*, void*> p2 = std::make_pair(c, i.first);
- if (lockorders.count(p2))
- potential_deadlock_detected(p1, lockorders[p2], lockorders[p1]);
+ lockdata.invlockorders.insert(p2);
+ if (lockdata.lockorders.count(p2))
+ potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
- dd_mutex.unlock();
}
static void pop_lock()
{
- dd_mutex.lock();
(*lockstack).pop_back();
- dd_mutex.unlock();
}
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry)
@@ -173,4 +184,26 @@ void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine,
abort();
}
+void DeleteLock(void* cs)
+{
+ if (!lockdata.available) {
+ // We're already shutting down.
+ return;
+ }
+ boost::unique_lock<boost::mutex> lock(lockdata.dd_mutex);
+ std::pair<void*, void*> item = std::make_pair(cs, (void*)0);
+ LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
+ while (it != lockdata.lockorders.end() && it->first.first == cs) {
+ std::pair<void*, void*> invitem = std::make_pair(it->first.second, it->first.first);
+ lockdata.invlockorders.erase(invitem);
+ lockdata.lockorders.erase(it++);
+ }
+ InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
+ while (invit != lockdata.invlockorders.end() && invit->first == cs) {
+ std::pair<void*, void*> invinvitem = std::make_pair(invit->second, invit->first);
+ lockdata.lockorders.erase(invinvitem);
+ lockdata.invlockorders.erase(invit++);
+ }
+}
+
#endif /* DEBUG_LOCKORDER */
diff --git a/src/sync.h b/src/sync.h
index 34dd8c228e..0c58fb6b4e 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -71,30 +71,39 @@ public:
}
};
-/**
- * Wrapped boost mutex: supports recursive locking, but no waiting
- * TODO: We should move away from using the recursive lock by default.
- */
-typedef AnnotatedMixin<boost::recursive_mutex> CCriticalSection;
-
-/** Wrapped boost mutex: supports waiting but not recursive locking */
-typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection;
-
-/** Just a typedef for boost::condition_variable, can be wrapped later if desired */
-typedef boost::condition_variable CConditionVariable;
-
#ifdef DEBUG_LOCKORDER
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false);
void LeaveCritical();
std::string LocksHeld();
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
+void DeleteLock(void* cs);
#else
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
void static inline LeaveCritical() {}
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
+void static inline DeleteLock(void* cs) {}
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
+/**
+ * Wrapped boost mutex: supports recursive locking, but no waiting
+ * TODO: We should move away from using the recursive lock by default.
+ */
+class CCriticalSection : public AnnotatedMixin<boost::recursive_mutex>
+{
+public:
+ ~CCriticalSection() {
+ DeleteLock((void*)this);
+ }
+};
+
+typedef CCriticalSection CDynamicCriticalSection;
+/** Wrapped boost mutex: supports waiting but not recursive locking */
+typedef AnnotatedMixin<boost::mutex> CWaitableCriticalSection;
+
+/** Just a typedef for boost::condition_variable, can be wrapped later if desired */
+typedef boost::condition_variable CConditionVariable;
+
#ifdef DEBUG_LOCKCONTENTION
void PrintLockContention(const char* pszName, const char* pszFile, int nLine);
#endif