diff options
author | MarcoFalke <falke.marco@gmail.com> | 2020-05-26 08:14:39 -0400 |
---|---|---|
committer | MarcoFalke <falke.marco@gmail.com> | 2020-05-26 08:14:57 -0400 |
commit | fe1357a03af108c41baa6bd31903f2cfb0d75ef5 (patch) | |
tree | 79395357aae57300859d1c25416a666a9a33068e | |
parent | 13397dc78fd6a6a57bdc91db66c51788603267bb (diff) | |
parent | 90eb027204f5a9d7c00fa97d4112243bd37a9012 (diff) |
Merge #18881: Prevent UB in DeleteLock() function
90eb027204f5a9d7c00fa97d4112243bd37a9012 doc: Add and fix comments about never destroyed objects (Hennadii Stepanov)
26c093a9957756f3743c2347fe0abd90f81159c4 Replace thread_local g_lockstack with a mutex-protected map (Hennadii Stepanov)
58e6881bc5be002e8ddbc9b75422c0deae66a2df refactor: Refactor duplicated code into LockHeld() (Hennadii Stepanov)
f511f61dda4e860079153d5e51d64658cc265283 refactor: Add LockPair type alias (Hennadii Stepanov)
8d8921abd35c3ac1b8ebacb11de8e1bbc7b28d66 refactor: Add LockStackItem type alias (Hennadii Stepanov)
458992b06d80eb568141f60a33d38e12e894e27a Prevent UB in DeleteLock() function (Hennadii Stepanov)
Pull request description:
Tracking our instrumented mutexes (`Mutex` and `RecursiveMutex` types) requires that all involved objects should not be destroyed until after their last use. On master (ec79b5f86b22ad8f77c736f9bb76c2e4d7faeaa4) we have two problems related to the object destroying order:
- the function-local `static` `lockdata` object that is destroyed at [program exit](https://en.cppreference.com/w/cpp/utility/program/exit)
- the `thread_local` `g_lockstack` that is destroyed at [thread exit](https://en.cppreference.com/w/cpp/language/destructor)
Both cases could cause UB at program exit in so far as mutexes are used in other static object destructors.
Fix #18824
ACKs for top commit:
MarcoFalke:
re-ACK 90eb027204, only change is new doc commit ðŸ‘
ryanofsky:
Code review ACK 90eb027204f5a9d7c00fa97d4112243bd37a9012 because all the changes look correct and safe. But I don't know the purpose of commit 26c093a9957756f3743c2347fe0abd90f81159c4 "Replace thread_local g_lockstack with a mutex-protected map (5/6)." It seems like it could have a bad impact on debug performance, and the commit message and PR description don't give a reason for the change.
Tree-SHA512: 99f29157fd1278994e3f6eebccedfd9dae540450f5f8b980518345a89d56b635f943a85b20864cef087027fd0fcdb4880b659ef59bfe5626d110452ae22031c6
-rw-r--r-- | src/logging.cpp | 4 | ||||
-rw-r--r-- | src/sync.cpp | 121 |
2 files changed, 75 insertions, 50 deletions
diff --git a/src/logging.cpp b/src/logging.cpp index eb9da06d9b..56c44ae1ea 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -22,8 +22,8 @@ BCLog::Logger& LogInstance() * access the logger. When the shutdown sequence is fully audited and tested, * explicit destruction of these objects can be implemented by changing this * from a raw pointer to a std::unique_ptr. - * Since the destructor is never called, the logger and all its members must - * have a trivial destructor. + * Since the ~Logger() destructor is never called, the Logger class and all + * its subclasses must have implicitly-defined destructors. * * This method of initialization was originally introduced in * ee3374234c60aba2cc4c5cd5cac1c0aefc2d817c. diff --git a/src/sync.cpp b/src/sync.cpp index b86c57e498..c3312b5a00 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -7,15 +7,19 @@ #endif #include <sync.h> -#include <tinyformat.h> #include <logging.h> +#include <tinyformat.h> #include <util/strencodings.h> #include <util/threadnames.h> #include <map> #include <set> #include <system_error> +#include <thread> +#include <unordered_map> +#include <utility> +#include <vector> #ifdef DEBUG_LOCKCONTENTION #if !defined(HAVE_THREAD_LOCAL) @@ -73,35 +77,35 @@ private: int sourceLine; }; -typedef std::vector<std::pair<void*, CLockLocation> > LockStack; -typedef std::map<std::pair<void*, void*>, LockStack> LockOrders; -typedef std::set<std::pair<void*, void*> > InvLockOrders; +using LockStackItem = std::pair<void*, CLockLocation>; +using LockStack = std::vector<LockStackItem>; +using LockStacks = std::unordered_map<std::thread::id, LockStack>; -struct LockData { - // Very ugly hack: as the global constructs and destructors run single - // threaded, we use this boolean to know whether LockData still exists, - // as DeleteLock can get called by global RecursiveMutex destructors - // after LockData disappears. - bool available; - LockData() : available(true) {} - ~LockData() { available = false; } +using LockPair = std::pair<void*, void*>; +using LockOrders = std::map<LockPair, LockStack>; +using InvLockOrders = std::set<LockPair>; +struct LockData { + LockStacks m_lock_stacks; LockOrders lockorders; InvLockOrders invlockorders; std::mutex dd_mutex; }; + LockData& GetLockData() { - static LockData lockdata; - return lockdata; + // This approach guarantees that the object is not destroyed until after its last use. + // The operating system automatically reclaims all the memory in a program's heap when that program exits. + // Since the ~LockData() destructor is never called, the LockData class and all + // its subclasses must have implicitly-defined destructors. + static LockData& lock_data = *new LockData(); + return lock_data; } -static thread_local LockStack g_lockstack; - -static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, const LockStack& s1, const LockStack& s2) +static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2) { LogPrintf("POTENTIAL DEADLOCK DETECTED\n"); LogPrintf("Previous lock order was:\n"); - for (const std::pair<void*, CLockLocation> & i : s2) { + for (const LockStackItem& i : s2) { if (i.first == mismatch.first) { LogPrintf(" (1)"); /* Continued */ } @@ -111,7 +115,7 @@ static void potential_deadlock_detected(const std::pair<void*, void*>& mismatch, LogPrintf(" %s\n", i.second.ToString()); } LogPrintf("Current lock order is:\n"); - for (const std::pair<void*, CLockLocation> & i : s1) { + for (const LockStackItem& i : s1) { if (i.first == mismatch.first) { LogPrintf(" (1)"); /* Continued */ } @@ -132,18 +136,18 @@ static void push_lock(void* c, const CLockLocation& locklocation) LockData& lockdata = GetLockData(); std::lock_guard<std::mutex> lock(lockdata.dd_mutex); - g_lockstack.push_back(std::make_pair(c, locklocation)); - - for (const std::pair<void*, CLockLocation>& i : g_lockstack) { + LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; + lock_stack.emplace_back(c, locklocation); + for (const LockStackItem& i : lock_stack) { if (i.first == c) break; - std::pair<void*, void*> p1 = std::make_pair(i.first, c); + const LockPair p1 = std::make_pair(i.first, c); if (lockdata.lockorders.count(p1)) continue; - lockdata.lockorders.emplace(p1, g_lockstack); + lockdata.lockorders.emplace(p1, lock_stack); - std::pair<void*, void*> p2 = std::make_pair(c, i.first); + const LockPair p2 = std::make_pair(c, i.first); lockdata.invlockorders.insert(p2); if (lockdata.lockorders.count(p2)) potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]); @@ -152,7 +156,14 @@ static void push_lock(void* c, const CLockLocation& locklocation) static void pop_lock() { - g_lockstack.pop_back(); + LockData& lockdata = GetLockData(); + std::lock_guard<std::mutex> lock(lockdata.dd_mutex); + + LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; + lock_stack.pop_back(); + if (lock_stack.empty()) { + lockdata.m_lock_stacks.erase(std::this_thread::get_id()); + } } void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry) @@ -162,11 +173,17 @@ void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) { - if (!g_lockstack.empty()) { - const auto& lastlock = g_lockstack.back(); - if (lastlock.first == cs) { - lockname = lastlock.second.Name(); - return; + { + LockData& lockdata = GetLockData(); + std::lock_guard<std::mutex> lock(lockdata.dd_mutex); + + const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; + if (!lock_stack.empty()) { + const auto& lastlock = lock_stack.back(); + if (lastlock.first == cs) { + lockname = lastlock.second.Name(); + return; + } } } throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname)); @@ -179,49 +196,57 @@ void LeaveCritical() std::string LocksHeld() { + LockData& lockdata = GetLockData(); + std::lock_guard<std::mutex> lock(lockdata.dd_mutex); + + const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; std::string result; - for (const std::pair<void*, CLockLocation>& i : g_lockstack) + for (const LockStackItem& i : lock_stack) result += i.second.ToString() + std::string("\n"); return result; } +static bool LockHeld(void* mutex) +{ + LockData& lockdata = GetLockData(); + std::lock_guard<std::mutex> lock(lockdata.dd_mutex); + + const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()]; + for (const LockStackItem& i : lock_stack) { + if (i.first == mutex) return true; + } + + return false; +} + void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) { - for (const std::pair<void*, CLockLocation>& i : g_lockstack) - if (i.first == cs) - return; + if (LockHeld(cs)) return; tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); abort(); } void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) { - for (const std::pair<void*, CLockLocation>& i : g_lockstack) { - if (i.first == cs) { - tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); - abort(); - } - } + if (!LockHeld(cs)) return; + tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); + abort(); } void DeleteLock(void* cs) { LockData& lockdata = GetLockData(); - if (!lockdata.available) { - // We're already shutting down. - return; - } std::lock_guard<std::mutex> lock(lockdata.dd_mutex); - std::pair<void*, void*> item = std::make_pair(cs, nullptr); + const LockPair item = std::make_pair(cs, nullptr); LockOrders::iterator it = lockdata.lockorders.lower_bound(item); while (it != lockdata.lockorders.end() && it->first.first == cs) { - std::pair<void*, void*> invitem = std::make_pair(it->first.second, it->first.first); + const LockPair invitem = std::make_pair(it->first.second, it->first.first); lockdata.invlockorders.erase(invitem); lockdata.lockorders.erase(it++); } InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item); while (invit != lockdata.invlockorders.end() && invit->first == cs) { - std::pair<void*, void*> invinvitem = std::make_pair(invit->second, invit->first); + const LockPair invinvitem = std::make_pair(invit->second, invit->first); lockdata.lockorders.erase(invinvitem); lockdata.invlockorders.erase(invit++); } |