From: Ed Schouten Date: Wed, 3 Sep 2014 06:00:11 +0000 (+0000) Subject: Allow a scoped lockable object to acquire/release multiple locks. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=8291bead636f3f3f032bd5dcee2d7f9dfd546232;p=clang Allow a scoped lockable object to acquire/release multiple locks. Scoped lockable objects (mutex guards) are implemented as if it is a lock itself that is acquired upon construction and unlocked upon destruction. As it if course needs to be used to actually lock down something else (a mutex), it keeps track of this knowledge through its underlying mutex field in its FactEntry. The problem with this approach is that this only allows us to lock down a single mutex, so extend the code to use a vector of underlying mutexes. This, however, makes the code a bit more complex than necessary, so subclass FactEntry into LockableFactEntry and ScopedLockableFactEntry and move all the logic that differs between regular locks and scoped lockables into member functions. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@217016 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp index f2746cba8c..008561d5e7 100644 --- a/lib/Analysis/ThreadSafety.cpp +++ b/lib/Analysis/ThreadSafety.cpp @@ -85,7 +85,8 @@ public: } }; - +class FactManager; +class FactSet; /// \brief This is a helper class that stores a fact that is known at a /// particular point in program execution. Currently, a fact is a capability, @@ -99,28 +100,27 @@ class FactEntry : public CapabilityExpr { private: LockKind LKind; ///< exclusive or shared SourceLocation AcquireLoc; ///< where it was acquired. - bool Managed; ///< for ScopedLockable objects bool Asserted; ///< true if the lock was asserted - const til::SExpr* UnderlyingMutex; ///< for ScopedLockable objects public: FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, - bool Mng=false, bool Asrt=false) - : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(Mng), - Asserted(Asrt), UnderlyingMutex(nullptr) - {} + bool Asrt) + : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt) {} - FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, - const til::SExpr *Mu) - : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(false), - Asserted(false), UnderlyingMutex(Mu) - {} + virtual ~FactEntry() {} LockKind kind() const { return LKind; } SourceLocation loc() const { return AcquireLoc; } bool asserted() const { return Asserted; } - bool managed() const { return Managed; } - const til::SExpr* underlying() const { return UnderlyingMutex; } + + virtual void + handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, + SourceLocation JoinLoc, LockErrorKind LEK, + ThreadSafetyHandler &Handler) const = 0; + virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, + const CapabilityExpr &Cp, SourceLocation UnlockLoc, + bool FullyRemove, ThreadSafetyHandler &Handler, + StringRef DiagKind) const = 0; // Return true if LKind >= LK, where exclusive > shared bool isAtLeast(LockKind LK) { @@ -135,16 +135,16 @@ typedef unsigned short FactID; /// the analysis of a single routine. class FactManager { private: - std::vector Facts; + std::vector> Facts; public: - FactID newFact(const FactEntry &Entry) { - Facts.push_back(Entry); + FactID newFact(std::unique_ptr Entry) { + Facts.push_back(std::move(Entry)); return static_cast(Facts.size() - 1); } - const FactEntry& operator[](FactID F) const { return Facts[F]; } - FactEntry& operator[](FactID F) { return Facts[F]; } + const FactEntry &operator[](FactID F) const { return *Facts[F]; } + FactEntry &operator[](FactID F) { return *Facts[F]; } }; @@ -184,8 +184,8 @@ public: void addLockByID(FactID ID) { FactIDs.push_back(ID); } - FactID addLock(FactManager& FM, const FactEntry &Entry) { - FactID F = FM.newFact(Entry); + FactID addLock(FactManager &FM, std::unique_ptr Entry) { + FactID F = FM.newFact(std::move(Entry)); FactIDs.push_back(F); return F; } @@ -758,6 +758,98 @@ static void findBlockLocations(CFG *CFGraph, } } +class LockableFactEntry : public FactEntry { +private: + bool Managed; ///< managed by ScopedLockable object + +public: + LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, + bool Mng = false, bool Asrt = false) + : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {} + + void + handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, + SourceLocation JoinLoc, LockErrorKind LEK, + ThreadSafetyHandler &Handler) const override { + if (!Managed && !asserted() && !negative() && !isUniversal()) { + Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc, + LEK); + } + } + + void handleUnlock(FactSet &FSet, FactManager &FactMan, + const CapabilityExpr &Cp, SourceLocation UnlockLoc, + bool FullyRemove, ThreadSafetyHandler &Handler, + StringRef DiagKind) const override { + FSet.removeLock(FactMan, Cp); + if (!Cp.negative()) { + FSet.addLock(FactMan, llvm::make_unique( + !Cp, LK_Exclusive, UnlockLoc)); + } + } +}; + +class ScopedLockableFactEntry : public FactEntry { +private: + SmallVector UnderlyingMutexes; + +public: + ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc, + const CapExprSet &Excl, const CapExprSet &Shrd) + : FactEntry(CE, LK_Exclusive, Loc, false) { + for (const auto &M : Excl) + UnderlyingMutexes.push_back(M.sexpr()); + for (const auto &M : Shrd) + UnderlyingMutexes.push_back(M.sexpr()); + } + + void + handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, + SourceLocation JoinLoc, LockErrorKind LEK, + ThreadSafetyHandler &Handler) const override { + for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) { + if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) { + // If this scoped lock manages another mutex, and if the underlying + // mutex is still held, then warn about the underlying mutex. + Handler.handleMutexHeldEndOfScope( + "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK); + } + } + } + + void handleUnlock(FactSet &FSet, FactManager &FactMan, + const CapabilityExpr &Cp, SourceLocation UnlockLoc, + bool FullyRemove, ThreadSafetyHandler &Handler, + StringRef DiagKind) const override { + assert(!Cp.negative() && "Managing object cannot be negative."); + for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) { + CapabilityExpr UnderCp(UnderlyingMutex, false); + auto UnderEntry = llvm::make_unique( + !UnderCp, LK_Exclusive, UnlockLoc); + + if (FullyRemove) { + // We're destroying the managing object. + // Remove the underlying mutex if it exists; but don't warn. + if (FSet.findLock(FactMan, UnderCp)) { + FSet.removeLock(FactMan, UnderCp); + FSet.addLock(FactMan, std::move(UnderEntry)); + } + } else { + // We're releasing the underlying mutex, but not destroying the + // managing object. Warn on dual release. + if (!FSet.findLock(FactMan, UnderCp)) { + Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), + UnlockLoc); + } + FSet.removeLock(FactMan, UnderCp); + FSet.addLock(FactMan, std::move(UnderEntry)); + } + } + if (FullyRemove) + FSet.removeLock(FactMan, Cp); + } +}; + /// \brief Class which implements the core thread safety analysis routines. class ThreadSafetyAnalyzer { friend class BuildLockset; @@ -778,8 +870,8 @@ public: bool inCurrentScope(const CapabilityExpr &CapE); - void addLock(FactSet &FSet, const FactEntry &Entry, StringRef DiagKind, - bool ReqAttr = false); + void addLock(FactSet &FSet, std::unique_ptr Entry, + StringRef DiagKind, bool ReqAttr = false); void removeLock(FactSet &FSet, const CapabilityExpr &CapE, SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, StringRef DiagKind); @@ -908,32 +1000,33 @@ inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { /// \brief Add a new lock to the lockset, warning if the lock is already there. /// \param ReqAttr -- true if this is part of an initial Requires attribute. -void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry &Entry, +void ThreadSafetyAnalyzer::addLock(FactSet &FSet, + std::unique_ptr Entry, StringRef DiagKind, bool ReqAttr) { - if (Entry.shouldIgnore()) + if (Entry->shouldIgnore()) return; - if (!ReqAttr && !Entry.negative()) { + if (!ReqAttr && !Entry->negative()) { // look for the negative capability, and remove it from the fact set. - CapabilityExpr NegC = !Entry; + CapabilityExpr NegC = !*Entry; FactEntry *Nen = FSet.findLock(FactMan, NegC); if (Nen) { FSet.removeLock(FactMan, NegC); } else { - if (inCurrentScope(Entry) && !Entry.asserted()) - Handler.handleNegativeNotHeld(DiagKind, Entry.toString(), - NegC.toString(), Entry.loc()); + if (inCurrentScope(*Entry) && !Entry->asserted()) + Handler.handleNegativeNotHeld(DiagKind, Entry->toString(), + NegC.toString(), Entry->loc()); } } // FIXME: deal with acquired before/after annotations. // FIXME: Don't always warn when we have support for reentrant locks. - if (FSet.findLock(FactMan, Entry)) { - if (!Entry.asserted()) - Handler.handleDoubleLock(DiagKind, Entry.toString(), Entry.loc()); + if (FSet.findLock(FactMan, *Entry)) { + if (!Entry->asserted()) + Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc()); } else { - FSet.addLock(FactMan, Entry); + FSet.addLock(FactMan, std::move(Entry)); } } @@ -960,37 +1053,8 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, LDat->kind(), ReceivedKind, UnlockLoc); } - if (LDat->underlying()) { - assert(!Cp.negative() && "Managing object cannot be negative."); - CapabilityExpr UnderCp(LDat->underlying(), false); - FactEntry UnderEntry(!UnderCp, LK_Exclusive, UnlockLoc); - - // This is scoped lockable object, which manages the real mutex. - if (FullyRemove) { - // We're destroying the managing object. - // Remove the underlying mutex if it exists; but don't warn. - if (FSet.findLock(FactMan, UnderCp)) { - FSet.removeLock(FactMan, UnderCp); - FSet.addLock(FactMan, UnderEntry); - } - FSet.removeLock(FactMan, Cp); - } else { - // We're releasing the underlying mutex, but not destroying the - // managing object. Warn on dual release. - if (!FSet.findLock(FactMan, UnderCp)) { - Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), UnlockLoc); - } - FSet.removeLock(FactMan, UnderCp); - FSet.addLock(FactMan, UnderEntry); - } - return; - } - // else !LDat->underlying() - - FSet.removeLock(FactMan, Cp); - if (!Cp.negative()) { - FSet.addLock(FactMan, FactEntry(!Cp, LK_Exclusive, UnlockLoc)); - } + LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler, + DiagKind); } @@ -1192,10 +1256,12 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, // Add and remove locks. SourceLocation Loc = Exp->getExprLoc(); for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) - addLock(Result, FactEntry(ExclusiveLockToAdd, LK_Exclusive, Loc), + addLock(Result, llvm::make_unique(ExclusiveLockToAdd, + LK_Exclusive, Loc), CapDiagKind); for (const auto &SharedLockToAdd : SharedLocksToAdd) - addLock(Result, FactEntry(SharedLockToAdd, LK_Shared, Loc), + addLock(Result, llvm::make_unique(SharedLockToAdd, + LK_Shared, Loc), CapDiagKind); } @@ -1457,8 +1523,9 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { CapExprSet AssertLocks; Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); for (const auto &AssertLock : AssertLocks) - Analyzer->addLock(FSet, FactEntry(AssertLock, LK_Exclusive, Loc, - false, true), + Analyzer->addLock(FSet, + llvm::make_unique( + AssertLock, LK_Exclusive, Loc, false, true), ClassifyDiagnostic(A)); break; } @@ -1468,8 +1535,8 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { CapExprSet AssertLocks; Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); for (const auto &AssertLock : AssertLocks) - Analyzer->addLock(FSet, FactEntry(AssertLock, LK_Shared, Loc, - false, true), + Analyzer->addLock(FSet, llvm::make_unique( + AssertLock, LK_Shared, Loc, false, true), ClassifyDiagnostic(A)); break; } @@ -1523,32 +1590,28 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { // Add locks. for (const auto &M : ExclusiveLocksToAdd) - Analyzer->addLock(FSet, FactEntry(M, LK_Exclusive, Loc, isScopedVar), + Analyzer->addLock(FSet, llvm::make_unique( + M, LK_Exclusive, Loc, isScopedVar), CapDiagKind); for (const auto &M : SharedLocksToAdd) - Analyzer->addLock(FSet, FactEntry(M, LK_Shared, Loc, isScopedVar), + Analyzer->addLock(FSet, llvm::make_unique( + M, LK_Shared, Loc, isScopedVar), CapDiagKind); - // Add the managing object as a dummy mutex, mapped to the underlying mutex. - // FIXME: this doesn't work if we acquire multiple locks. if (isScopedVar) { + // Add the managing object as a dummy mutex, mapped to the underlying mutex. SourceLocation MLoc = VD->getLocation(); DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); // FIXME: does this store a pointer to DRE? CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); - for (const auto &M : ExclusiveLocksToAdd) - Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc, M.sexpr()), - CapDiagKind); - for (const auto &M : SharedLocksToAdd) - Analyzer->addLock(FSet, FactEntry(Scp, LK_Shared, MLoc, M.sexpr()), - CapDiagKind); - - // handle corner case where the underlying mutex is invalid - if (ExclusiveLocksToAdd.size() == 0 && SharedLocksToAdd.size() == 0) { - Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc), - CapDiagKind); - } + CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd); + std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(), + std::back_inserter(UnderlyingMutexes)); + Analyzer->addLock(FSet, + llvm::make_unique( + Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd), + CapDiagKind); } // Remove locks. @@ -1729,22 +1792,8 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, *Iter1 = Fact; } } else { - if (LDat2->underlying()) { - if (FSet2.findLock(FactMan, CapabilityExpr(LDat2->underlying(), - false))) { - // If this is a scoped lock that manages another mutex, and if the - // underlying mutex is still held, then warn about the underlying - // mutex. - Handler.handleMutexHeldEndOfScope("mutex", - sx::toString(LDat2->underlying()), - LDat2->loc(), JoinLoc, LEK1); - } - } - else if (!LDat2->managed() && !LDat2->asserted() && - !LDat2->negative() && !LDat2->isUniversal()) { - Handler.handleMutexHeldEndOfScope("mutex", LDat2->toString(), - LDat2->loc(), JoinLoc, LEK1); - } + LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1, + Handler); } } @@ -1754,22 +1803,8 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1); if (!LDat2) { - if (LDat1->underlying()) { - if (FSet1Orig.findLock(FactMan, CapabilityExpr(LDat1->underlying(), - false))) { - // If this is a scoped lock that manages another mutex, and if the - // underlying mutex is still held, then warn about the underlying - // mutex. - Handler.handleMutexHeldEndOfScope("mutex", - sx::toString(LDat1->underlying()), - LDat1->loc(), JoinLoc, LEK1); - } - } - else if (!LDat1->managed() && !LDat1->asserted() && - !LDat1->negative() && !LDat1->isUniversal()) { - Handler.handleMutexHeldEndOfScope("mutex", LDat1->toString(), - LDat1->loc(), JoinLoc, LEK2); - } + LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2, + Handler); if (Modify) FSet1.removeLock(FactMan, *LDat1); } @@ -1895,10 +1930,12 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { // FIXME -- Loc can be wrong here. for (const auto &Mu : ExclusiveLocksToAdd) - addLock(InitialLockset, FactEntry(Mu, LK_Exclusive, Loc), + addLock(InitialLockset, + llvm::make_unique(Mu, LK_Exclusive, Loc), CapDiagKind, true); for (const auto &Mu : SharedLocksToAdd) - addLock(InitialLockset, FactEntry(Mu, LK_Shared, Loc), + addLock(InitialLockset, + llvm::make_unique(Mu, LK_Shared, Loc), CapDiagKind, true); } @@ -2068,11 +2105,11 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { // issue the appropriate warning. // FIXME: the location here is not quite right. for (const auto &Lock : ExclusiveLocksAcquired) - ExpectedExitSet.addLock(FactMan, FactEntry(Lock, LK_Exclusive, - D->getLocation())); + ExpectedExitSet.addLock(FactMan, llvm::make_unique( + Lock, LK_Exclusive, D->getLocation())); for (const auto &Lock : SharedLocksAcquired) - ExpectedExitSet.addLock(FactMan, FactEntry(Lock, LK_Shared, - D->getLocation())); + ExpectedExitSet.addLock(FactMan, llvm::make_unique( + Lock, LK_Shared, D->getLocation())); for (const auto &Lock : LocksReleased) ExpectedExitSet.removeLock(FactMan, Lock); diff --git a/test/SemaCXX/warn-thread-safety-analysis.cpp b/test/SemaCXX/warn-thread-safety-analysis.cpp index b9f999d079..7f0a2d1ccb 100644 --- a/test/SemaCXX/warn-thread-safety-analysis.cpp +++ b/test/SemaCXX/warn-thread-safety-analysis.cpp @@ -63,6 +63,12 @@ class SCOPED_LOCKABLE ReleasableMutexLock { void Release() UNLOCK_FUNCTION(); }; +class __attribute__((scoped_lockable)) DoubleMutexLock { +public: + DoubleMutexLock(Mutex *mu1, Mutex *mu2) + __attribute__((exclusive_lock_function(mu1, mu2))); + ~DoubleMutexLock() __attribute__((unlock_function)); +}; // The universal lock, written "*", allows checking to be selectively turned // off for a particular piece of code. @@ -1662,6 +1668,12 @@ struct TestScopedLockable { a = b+1; b = a+1; } + + void foo5() { + DoubleMutexLock mulock(&mu1, &mu2); + a = b + 1; + b = a + 1; + } }; } // end namespace test_scoped_lockable