]> granicus.if.org Git - clang/commitdiff
Thread safety analysis: Allow scoped releasing of capabilities
authorAaron Puchert <aaronpuchert@alice-dsl.net>
Sun, 16 Dec 2018 14:15:30 +0000 (14:15 +0000)
committerAaron Puchert <aaronpuchert@alice-dsl.net>
Sun, 16 Dec 2018 14:15:30 +0000 (14:15 +0000)
Summary:
The pattern is problematic with C++ exceptions, and not as widespread as
scoped locks, but it's still used by some, for example Chromium.

We are a bit stricter here at join points, patterns that are allowed for
scoped locks aren't allowed here. That could still be changed in the
future, but I'd argue we should only relax this if people ask for it.

Fixes PR36162.

Reviewers: aaron.ballman, delesley, pwnall

Reviewed By: delesley, pwnall

Subscribers: pwnall, cfe-commits

Differential Revision: https://reviews.llvm.org/D52578

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@349300 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Analysis/ThreadSafety.cpp
test/SemaCXX/warn-thread-safety-analysis.cpp

index f83b0e0cab270c6ec900281c2daec516f7332c99..ca146a83c6c2e7cea4051f1e26bd199afe15d7b7 100644 (file)
@@ -42,6 +42,7 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/ImmutableMap.h"
 #include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
@@ -890,28 +891,46 @@ public:
 
 class ScopedLockableFactEntry : public FactEntry {
 private:
-  SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+  enum UnderlyingCapabilityKind {
+    UCK_Acquired,          ///< Any kind of acquired capability.
+    UCK_ReleasedShared,    ///< Shared capability that was released.
+    UCK_ReleasedExclusive, ///< Exclusive capability that was released.
+  };
+
+  using UnderlyingCapability =
+      llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>;
+
+  SmallVector<UnderlyingCapability, 4> UnderlyingMutexes;
 
 public:
   ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
-                          const CapExprSet &Excl, const CapExprSet &Shrd)
+                          const CapExprSet &Excl, const CapExprSet &Shrd,
+                          const CapExprSet &ExclRel, const CapExprSet &ShrdRel)
       : FactEntry(CE, LK_Exclusive, Loc, false) {
     for (const auto &M : Excl)
-      UnderlyingMutexes.push_back(M.sexpr());
+      UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
     for (const auto &M : Shrd)
-      UnderlyingMutexes.push_back(M.sexpr());
+      UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
+    for (const auto &M : ExclRel)
+      UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive);
+    for (const auto &M : ShrdRel)
+      UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared);
   }
 
   void
   handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
                                 SourceLocation JoinLoc, LockErrorKind LEK,
                                 ThreadSafetyHandler &Handler) const override {
-    for (const auto *UnderlyingMutex : UnderlyingMutexes) {
-      if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+    for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+      const auto *Entry = FSet.findLock(
+          FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false));
+      if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) ||
+          (UnderlyingMutex.getInt() != UCK_Acquired && !Entry)) {
         // If this scoped lock manages another mutex, and if the underlying
-        // mutex is still held, then warn about the underlying mutex.
+        // mutex is still/not held, then warn about the underlying mutex.
         Handler.handleMutexHeldEndOfScope(
-            "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+            "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc,
+            LEK);
       }
     }
   }
@@ -919,17 +938,14 @@ public:
   void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
                   ThreadSafetyHandler &Handler,
                   StringRef DiagKind) const override {
-    for (const auto *UnderlyingMutex : UnderlyingMutexes) {
-      CapabilityExpr UnderCp(UnderlyingMutex, false);
+    for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+      CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
 
-      // We're relocking the underlying mutexes. Warn on double locking.
-      if (FSet.findLock(FactMan, UnderCp)) {
-        Handler.handleDoubleLock(DiagKind, UnderCp.toString(), entry.loc());
-      } else {
-        FSet.removeLock(FactMan, !UnderCp);
-        FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
-                                  UnderCp, entry.kind(), entry.loc()));
-      }
+      if (UnderlyingMutex.getInt() == UCK_Acquired)
+        lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler,
+             DiagKind);
+      else
+        unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind);
     }
   }
 
@@ -938,32 +954,49 @@ public:
                     bool FullyRemove, ThreadSafetyHandler &Handler,
                     StringRef DiagKind) const override {
     assert(!Cp.negative() && "Managing object cannot be negative.");
-    for (const auto *UnderlyingMutex : UnderlyingMutexes) {
-      CapabilityExpr UnderCp(UnderlyingMutex, false);
-      auto UnderEntry = llvm::make_unique<LockableFactEntry>(
-          !UnderCp, LK_Exclusive, UnlockLoc);
-
-      if (FullyRemove) {
-        // We're destroying the managing object.
-        // Remove the underlying mutex if it exists; but don't warn.
-        if (FSet.findLock(FactMan, UnderCp)) {
-          FSet.removeLock(FactMan, UnderCp);
-          FSet.addLock(FactMan, std::move(UnderEntry));
-        }
+    for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+      CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
+
+      // Remove/lock the underlying mutex if it exists/is still unlocked; warn
+      // on double unlocking/locking if we're not destroying the scoped object.
+      ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
+      if (UnderlyingMutex.getInt() == UCK_Acquired) {
+        unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind);
       } else {
-        // We're releasing the underlying mutex, but not destroying the
-        // managing object.  Warn on dual release.
-        if (!FSet.findLock(FactMan, UnderCp)) {
-          Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
-                                        UnlockLoc);
-        }
-        FSet.removeLock(FactMan, UnderCp);
-        FSet.addLock(FactMan, std::move(UnderEntry));
+        LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared
+                            ? LK_Shared
+                            : LK_Exclusive;
+        lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind);
       }
     }
     if (FullyRemove)
       FSet.removeLock(FactMan, Cp);
   }
+
+private:
+  void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
+            LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler,
+            StringRef DiagKind) const {
+    if (!FSet.findLock(FactMan, Cp)) {
+      FSet.removeLock(FactMan, !Cp);
+      FSet.addLock(FactMan,
+                   llvm::make_unique<LockableFactEntry>(Cp, kind, loc));
+    } else if (Handler) {
+      Handler->handleDoubleLock(DiagKind, Cp.toString(), loc);
+    }
+  }
+
+  void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
+              SourceLocation loc, ThreadSafetyHandler *Handler,
+              StringRef DiagKind) const {
+    if (FSet.findLock(FactMan, Cp)) {
+      FSet.removeLock(FactMan, Cp);
+      FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                !Cp, LK_Exclusive, loc));
+    } else if (Handler) {
+      Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc);
+    }
+  }
 };
 
 /// Class which implements the core thread safety analysis routines.
@@ -1911,7 +1944,8 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
               std::back_inserter(SharedLocksToAdd));
     Analyzer->addLock(FSet,
                       llvm::make_unique<ScopedLockableFactEntry>(
-                          Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
+                          Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd,
+                          ExclusiveLocksToRemove, SharedLocksToRemove),
                       CapDiagKind);
   }
 }
index 54e3369f4d32c04bc637526e83717d8b58591640..f959beb6221e26e70df410d7da55f1f546681b9e 100644 (file)
@@ -2787,6 +2787,110 @@ void relockWeird() {
 } // end namespace RelockableScopedLock
 
 
+namespace ScopedUnlock {
+
+class SCOPED_LOCKABLE MutexUnlock {
+public:
+  MutexUnlock(Mutex *mu) EXCLUSIVE_UNLOCK_FUNCTION(mu);
+  ~MutexUnlock() EXCLUSIVE_UNLOCK_FUNCTION();
+
+  void Lock() EXCLUSIVE_UNLOCK_FUNCTION();
+  void Unlock() EXCLUSIVE_LOCK_FUNCTION();
+};
+
+class SCOPED_LOCKABLE ReaderMutexUnlock {
+public:
+  ReaderMutexUnlock(Mutex *mu) SHARED_UNLOCK_FUNCTION(mu);
+  ~ReaderMutexUnlock() EXCLUSIVE_UNLOCK_FUNCTION();
+
+  void Lock() EXCLUSIVE_UNLOCK_FUNCTION();
+  void Unlock() EXCLUSIVE_LOCK_FUNCTION();
+};
+
+Mutex mu;
+int x GUARDED_BY(mu);
+bool c;
+void print(int);
+
+void simple() EXCLUSIVE_LOCKS_REQUIRED(mu) {
+  x = 1;
+  MutexUnlock scope(&mu);
+  x = 2; // expected-warning {{writing variable 'x' requires holding mutex 'mu' exclusively}}
+}
+
+void simpleShared() SHARED_LOCKS_REQUIRED(mu) {
+  print(x);
+  ReaderMutexUnlock scope(&mu);
+  print(x); // expected-warning {{reading variable 'x' requires holding mutex 'mu'}}
+}
+
+void innerUnlock() {
+  MutexLock outer(&mu);
+  if (x == 0) {
+    MutexUnlock inner(&mu);
+    x = 1; // expected-warning {{writing variable 'x' requires holding mutex 'mu' exclusively}}
+  }
+  x = 2;
+}
+
+void innerUnlockShared() {
+  ReaderMutexLock outer(&mu);
+  if (x == 0) {
+    ReaderMutexUnlock inner(&mu);
+    print(x); // expected-warning {{reading variable 'x' requires holding mutex 'mu'}}
+  }
+  print(x);
+}
+
+void manual() EXCLUSIVE_LOCKS_REQUIRED(mu) {
+  MutexUnlock scope(&mu);
+  scope.Lock();
+  x = 2;
+  scope.Unlock();
+  x = 3; // expected-warning {{writing variable 'x' requires holding mutex 'mu' exclusively}}
+}
+
+void join() EXCLUSIVE_LOCKS_REQUIRED(mu) {
+  MutexUnlock scope(&mu);
+  if (c) {
+    scope.Lock(); // expected-note{{mutex acquired here}}
+  }
+  // expected-warning@+1{{mutex 'mu' is not held on every path through here}}
+  scope.Lock();
+}
+
+void doubleLock() EXCLUSIVE_LOCKS_REQUIRED(mu) {
+  MutexUnlock scope(&mu);
+  scope.Lock();
+  scope.Lock(); // expected-warning {{acquiring mutex 'mu' that is already held}}
+}
+
+void doubleUnlock() EXCLUSIVE_LOCKS_REQUIRED(mu) {
+  MutexUnlock scope(&mu);
+  scope.Unlock(); // expected-warning {{releasing mutex 'mu' that was not held}}
+}
+
+class SCOPED_LOCKABLE MutexLockUnlock {
+public:
+  MutexLockUnlock(Mutex *mu1, Mutex *mu2) EXCLUSIVE_UNLOCK_FUNCTION(mu1) EXCLUSIVE_LOCK_FUNCTION(mu2);
+  ~MutexLockUnlock() EXCLUSIVE_UNLOCK_FUNCTION();
+
+  void Release() EXCLUSIVE_UNLOCK_FUNCTION();
+  void Acquire() EXCLUSIVE_LOCK_FUNCTION();
+};
+
+Mutex other;
+void fn() EXCLUSIVE_LOCKS_REQUIRED(other);
+
+void lockUnlock() EXCLUSIVE_LOCKS_REQUIRED(mu) {
+  MutexLockUnlock scope(&mu, &other);
+  fn();
+  x = 1; // expected-warning {{writing variable 'x' requires holding mutex 'mu' exclusively}}
+}
+
+} // end namespace ScopedUnlock
+
+
 namespace TrylockFunctionTest {
 
 class Foo {