1 //===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race
11 // conditions), based off of an annotation system.
13 // See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more
16 //===----------------------------------------------------------------------===//
18 #include "clang/Analysis/Analyses/ThreadSafety.h"
19 #include "clang/Analysis/AnalysisContext.h"
20 #include "clang/Analysis/CFG.h"
21 #include "clang/Analysis/CFGStmtMap.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/ExprCXX.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Basic/SourceManager.h"
27 #include "clang/Basic/SourceLocation.h"
28 #include "llvm/ADT/BitVector.h"
29 #include "llvm/ADT/FoldingSet.h"
30 #include "llvm/ADT/ImmutableMap.h"
31 #include "llvm/ADT/PostOrderIterator.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringRef.h"
37 using namespace clang;
38 using namespace thread_safety;
40 // Key method definition
41 ThreadSafetyHandler::~ThreadSafetyHandler() {}
44 static Expr *getParent(Expr *Exp) {
45 if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
47 if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp))
48 return CE->getImplicitObjectArgument();
53 /// \brief Implements a set of CFGBlocks using a BitVector.
55 /// This class contains a minimal interface, primarily dictated by the SetType
56 /// template parameter of the llvm::po_iterator template, as used with external
57 /// storage. We also use this set to keep track of which CFGBlocks we visit
58 /// during the analysis.
60 llvm::BitVector VisitedBlockIDs;
63 // po_iterator requires this iterator, but the only interface needed is the
64 // value_type typedef.
66 typedef const CFGBlock *value_type;
70 CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
72 /// \brief Set the bit associated with a particular CFGBlock.
73 /// This is the important method for the SetType template parameter.
74 bool insert(const CFGBlock *Block) {
75 // Note that insert() is called by po_iterator, which doesn't check to make
76 // sure that Block is non-null. Moreover, the CFGBlock iterator will
77 // occasionally hand out null pointers for pruned edges, so we catch those
80 return false; // if an edge is trivially false.
81 if (VisitedBlockIDs.test(Block->getBlockID()))
83 VisitedBlockIDs.set(Block->getBlockID());
87 /// \brief Check if the bit for a CFGBlock has been already set.
88 /// This method is for tracking visited blocks in the main threadsafety loop.
89 /// Block must not be null.
90 bool alreadySet(const CFGBlock *Block) {
91 return VisitedBlockIDs.test(Block->getBlockID());
95 /// \brief We create a helper class which we use to iterate through CFGBlocks in
96 /// the topological order.
97 class TopologicallySortedCFG {
98 typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator;
100 std::vector<const CFGBlock*> Blocks;
103 typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
105 TopologicallySortedCFG(const CFG *CFGraph) {
106 Blocks.reserve(CFGraph->getNumBlockIDs());
107 CFGBlockSet BSet(CFGraph);
109 for (po_iterator I = po_iterator::begin(CFGraph, BSet),
110 E = po_iterator::end(CFGraph, BSet); I != E; ++I) {
111 Blocks.push_back(*I);
116 return Blocks.rbegin();
120 return Blocks.rend();
124 return begin() == end();
128 /// \brief A MutexID object uniquely identifies a particular mutex, and
129 /// is built from an Expr* (i.e. calling a lock function).
131 /// Thread-safety analysis works by comparing lock expressions. Within the
132 /// body of a function, an expression such as "x->foo->bar.mu" will resolve to
133 /// a particular mutex object at run-time. Subsequent occurrences of the same
134 /// expression (where "same" means syntactic equality) will refer to the same
135 /// run-time object if three conditions hold:
136 /// (1) Local variables in the expression, such as "x" have not changed.
137 /// (2) Values on the heap that affect the expression have not changed.
138 /// (3) The expression involves only pure function calls.
139 /// The current implementation assumes, but does not verify, that multiple uses
140 /// of the same lock expression satisfies these criteria.
142 /// Clang introduces an additional wrinkle, which is that it is difficult to
143 /// derive canonical expressions, or compare expressions directly for equality.
144 /// Thus, we identify a mutex not by an Expr, but by the set of named
145 /// declarations that are referenced by the Expr. In other words,
146 /// x->foo->bar.mu will be a four element vector with the Decls for
147 /// mu, bar, and foo, and x. The vector will uniquely identify the expression
148 /// for all practical purposes.
150 /// Note we will need to perform substitution on "this" and function parameter
151 /// names when constructing a lock expression.
154 /// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
155 /// void myFunc(C *X) { ... X->lock() ... }
156 /// The original expression for the mutex acquired by myFunc is "this->Mu", but
157 /// "X" is substituted for "this" so we get X->Mu();
159 /// For another example:
160 /// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
162 /// foo(MyL); // requires lock MyL->Mu to be held
164 SmallVector<NamedDecl*, 2> DeclSeq;
166 /// Build a Decl sequence representing the lock from the given expression.
167 /// Recursive function that bottoms out when the final DeclRefExpr is reached.
168 // FIXME: Lock expressions that involve array indices or function calls.
169 // FIXME: Deal with LockReturned attribute.
170 void buildMutexID(Expr *Exp, Expr *Parent) {
171 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
172 NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
173 DeclSeq.push_back(ND);
174 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
175 NamedDecl *ND = ME->getMemberDecl();
176 DeclSeq.push_back(ND);
177 buildMutexID(ME->getBase(), Parent);
178 } else if (isa<CXXThisExpr>(Exp)) {
180 buildMutexID(Parent, 0);
182 return; // mutexID is still valid in this case
183 } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp))
184 buildMutexID(CE->getSubExpr(), Parent);
186 DeclSeq.clear(); // invalid lock expression
190 MutexID(Expr *LExpr, Expr *ParentExpr) {
191 buildMutexID(LExpr, ParentExpr);
194 /// If we encounter part of a lock expression we cannot parse
195 bool isValid() const {
196 return !DeclSeq.empty();
199 bool operator==(const MutexID &other) const {
200 return DeclSeq == other.DeclSeq;
203 bool operator!=(const MutexID &other) const {
204 return !(*this == other);
207 // SmallVector overloads Operator< to do lexicographic ordering. Note that
208 // we use pointer equality (and <) to compare NamedDecls. This means the order
209 // of MutexIDs in a lockset is nondeterministic. In order to output
210 // diagnostics in a deterministic ordering, we must order all diagnostics to
211 // output by SourceLocation when iterating through this lockset.
212 bool operator<(const MutexID &other) const {
213 return DeclSeq < other.DeclSeq;
216 /// \brief Returns the name of the first Decl in the list for a given MutexID;
217 /// e.g. the lock expression foo.bar() has name "bar".
218 /// The caret will point unambiguously to the lock expression, so using this
219 /// name in diagnostics is a way to get simple, and consistent, mutex names.
220 /// We do not want to output the entire expression text for security reasons.
221 StringRef getName() const {
223 return DeclSeq.front()->getName();
226 void Profile(llvm::FoldingSetNodeID &ID) const {
227 for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
228 E = DeclSeq.end(); I != E; ++I) {
234 /// \brief This is a helper class that stores info about the most recent
235 /// accquire of a Lock.
237 /// The main body of the analysis maps MutexIDs to LockDatas.
239 SourceLocation AcquireLoc;
241 /// \brief LKind stores whether a lock is held shared or exclusively.
242 /// Note that this analysis does not currently support either re-entrant
243 /// locking or lock "upgrading" and "downgrading" between exclusive and
246 /// FIXME: add support for re-entrant locking and lock up/downgrading
249 LockData(SourceLocation AcquireLoc, LockKind LKind)
250 : AcquireLoc(AcquireLoc), LKind(LKind) {}
252 bool operator==(const LockData &other) const {
253 return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
256 bool operator!=(const LockData &other) const {
257 return !(*this == other);
260 void Profile(llvm::FoldingSetNodeID &ID) const {
261 ID.AddInteger(AcquireLoc.getRawEncoding());
262 ID.AddInteger(LKind);
266 /// A Lockset maps each MutexID (defined above) to information about how it has
268 typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
270 /// \brief We use this class to visit different types of expressions in
271 /// CFGBlocks, and build up the lockset.
272 /// An expression may cause us to add or remove locks from the lockset, or else
273 /// output error messages related to missing locks.
274 /// FIXME: In future, we may be able to not inherit from a visitor.
275 class BuildLockset : public StmtVisitor<BuildLockset> {
276 ThreadSafetyHandler &Handler;
278 Lockset::Factory &LocksetFactory;
281 void removeLock(SourceLocation UnlockLoc, Expr *LockExp, Expr *Parent);
282 void addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
284 const ValueDecl *getValueDecl(Expr *Exp);
285 void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
286 Expr *MutexExp, ProtectedOperationKind POK);
287 void checkAccess(Expr *Exp, AccessKind AK);
288 void checkDereference(Expr *Exp, AccessKind AK);
290 template <class AttrType>
291 void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp);
293 /// \brief Returns true if the lockset contains a lock, regardless of whether
294 /// the lock is held exclusively or shared.
295 bool locksetContains(MutexID Lock) const {
296 return LSet.lookup(Lock);
299 /// \brief Returns true if the lockset contains a lock with the passed in
301 bool locksetContains(MutexID Lock, LockKind KindRequested) const {
302 const LockData *LockHeld = LSet.lookup(Lock);
303 return (LockHeld && KindRequested == LockHeld->LKind);
306 /// \brief Returns true if the lockset contains a lock with at least the
307 /// passed in locktype. So for example, if we pass in LK_Shared, this function
308 /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
309 /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
310 bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const {
311 switch (KindRequested) {
313 return locksetContains(Lock);
315 return locksetContains(Lock, KindRequested);
317 llvm_unreachable("Unknown LockKind");
321 BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F)
322 : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS),
325 Lockset getLockset() {
329 void VisitUnaryOperator(UnaryOperator *UO);
330 void VisitBinaryOperator(BinaryOperator *BO);
331 void VisitCastExpr(CastExpr *CE);
332 void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp);
335 /// \brief Add a new lock to the lockset, warning if the lock is already there.
336 /// \param LockLoc The source location of the acquire
337 /// \param LockExp The lock expression corresponding to the lock to be added
338 void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
340 // FIXME: deal with acquired before/after annotations. We can write a first
341 // pass that does the transitive lookup lazily, and refine afterwards.
342 MutexID Mutex(LockExp, Parent);
343 if (!Mutex.isValid()) {
344 Handler.handleInvalidLockExp(LockExp->getExprLoc());
348 LockData NewLock(LockLoc, LK);
350 // FIXME: Don't always warn when we have support for reentrant locks.
351 if (locksetContains(Mutex))
352 Handler.handleDoubleLock(Mutex.getName(), LockLoc);
353 LSet = LocksetFactory.add(LSet, Mutex, NewLock);
356 /// \brief Remove a lock from the lockset, warning if the lock is not there.
357 /// \param LockExp The lock expression corresponding to the lock to be removed
358 /// \param UnlockLoc The source location of the unlock (only used in error msg)
359 void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp,
361 MutexID Mutex(LockExp, Parent);
362 if (!Mutex.isValid()) {
363 Handler.handleInvalidLockExp(LockExp->getExprLoc());
367 Lockset NewLSet = LocksetFactory.remove(LSet, Mutex);
369 Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
374 /// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
375 const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
376 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
377 return DR->getDecl();
379 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
380 return ME->getMemberDecl();
385 /// \brief Warn if the LSet does not contain a lock sufficient to protect access
386 /// of at least the passed in AccessType.
387 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
388 AccessKind AK, Expr *MutexExp,
389 ProtectedOperationKind POK) {
390 LockKind LK = getLockKindFromAccessKind(AK);
391 Expr *Parent = getParent(Exp);
392 MutexID Mutex(MutexExp, Parent);
393 if (!Mutex.isValid())
394 Handler.handleInvalidLockExp(MutexExp->getExprLoc());
395 else if (!locksetContainsAtLeast(Mutex, LK))
396 Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
400 /// \brief This method identifies variable dereferences and checks pt_guarded_by
401 /// and pt_guarded_var annotations. Note that we only check these annotations
402 /// at the time a pointer is dereferenced.
403 /// FIXME: We need to check for other types of pointer dereferences
404 /// (e.g. [], ->) and deal with them here.
405 /// \param Exp An expression that has been read or written.
406 void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
407 UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
408 if (!UO || UO->getOpcode() != clang::UO_Deref)
410 Exp = UO->getSubExpr()->IgnoreParenCasts();
412 const ValueDecl *D = getValueDecl(Exp);
413 if(!D || !D->hasAttrs())
416 if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
417 Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
419 const AttrVec &ArgAttrs = D->getAttrs();
420 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
421 if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
422 warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
425 /// \brief Checks guarded_by and guarded_var attributes.
426 /// Whenever we identify an access (read or write) of a DeclRefExpr or
427 /// MemberExpr, we need to check whether there are any guarded_by or
428 /// guarded_var attributes, and make sure we hold the appropriate mutexes.
429 void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
430 const ValueDecl *D = getValueDecl(Exp);
431 if(!D || !D->hasAttrs())
434 if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
435 Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
437 const AttrVec &ArgAttrs = D->getAttrs();
438 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
439 if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
440 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
443 /// \brief For unary operations which read and write a variable, we need to
444 /// check whether we hold any required mutexes. Reads are checked in
446 void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
447 switch (UO->getOpcode()) {
448 case clang::UO_PostDec:
449 case clang::UO_PostInc:
450 case clang::UO_PreDec:
451 case clang::UO_PreInc: {
452 Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
453 checkAccess(SubExp, AK_Written);
454 checkDereference(SubExp, AK_Written);
462 /// For binary operations which assign to a variable (writes), we need to check
463 /// whether we hold any required mutexes.
464 /// FIXME: Deal with non-primitive types.
465 void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
466 if (!BO->isAssignmentOp())
468 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
469 checkAccess(LHSExp, AK_Written);
470 checkDereference(LHSExp, AK_Written);
473 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and
474 /// need to ensure we hold any required mutexes.
475 /// FIXME: Deal with non-primitive types.
476 void BuildLockset::VisitCastExpr(CastExpr *CE) {
477 if (CE->getCastKind() != CK_LValueToRValue)
479 Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
480 checkAccess(SubExp, AK_Read);
481 checkDereference(SubExp, AK_Read);
484 /// \brief This function, parameterized by an attribute type, is used to add a
485 /// set of locks specified as attribute arguments to the lockset.
486 template <typename AttrType>
487 void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
488 CXXMemberCallExpr *Exp) {
489 typedef typename AttrType::args_iterator iterator_type;
490 SourceLocation ExpLocation = Exp->getExprLoc();
491 Expr *Parent = Exp->getImplicitObjectArgument();
492 AttrType *SpecificAttr = cast<AttrType>(Attr);
494 if (SpecificAttr->args_size() == 0) {
495 // The mutex held is the "this" object.
496 addLock(ExpLocation, Parent, 0, LK);
500 for (iterator_type I = SpecificAttr->args_begin(),
501 E = SpecificAttr->args_end(); I != E; ++I)
502 addLock(ExpLocation, *I, Parent, LK);
505 /// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
506 /// the method that is being called and add, remove or check locks in the
507 /// lockset accordingly.
509 /// FIXME: For classes annotated with one of the guarded annotations, we need
510 /// to treat const method calls as reads and non-const method calls as writes,
511 /// and check that the appropriate locks are held. Non-const method calls with
512 /// the same signature as const method calls can be also treated as reads.
514 /// FIXME: We need to also visit CallExprs to catch/check global functions.
516 /// FIXME: Do not flag an error for member variables accessed in constructors/
518 void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
519 NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
521 SourceLocation ExpLocation = Exp->getExprLoc();
522 Expr *Parent = Exp->getImplicitObjectArgument();
524 if(!D || !D->hasAttrs())
527 AttrVec &ArgAttrs = D->getAttrs();
528 for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
529 Attr *Attr = ArgAttrs[i];
530 switch (Attr->getKind()) {
531 // When we encounter an exclusive lock function, we need to add the lock
532 // to our lockset with kind exclusive.
533 case attr::ExclusiveLockFunction:
534 addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
537 // When we encounter a shared lock function, we need to add the lock
538 // to our lockset with kind shared.
539 case attr::SharedLockFunction:
540 addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
543 // When we encounter an unlock function, we need to remove unlocked
544 // mutexes from the lockset, and flag a warning if they are not there.
545 case attr::UnlockFunction: {
546 UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
548 if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
549 removeLock(ExpLocation, Parent, 0);
553 for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
554 E = UFAttr->args_end(); I != E; ++I)
555 removeLock(ExpLocation, *I, Parent);
559 case attr::ExclusiveLocksRequired: {
560 ExclusiveLocksRequiredAttr *ELRAttr =
561 cast<ExclusiveLocksRequiredAttr>(Attr);
563 for (ExclusiveLocksRequiredAttr::args_iterator
564 I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
565 warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
569 case attr::SharedLocksRequired: {
570 SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
572 for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
573 E = SLRAttr->args_end(); I != E; ++I)
574 warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
578 case attr::LocksExcluded: {
579 LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
580 for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
581 E = LEAttr->args_end(); I != E; ++I) {
582 MutexID Mutex(*I, Parent);
583 if (!Mutex.isValid())
584 Handler.handleInvalidLockExp((*I)->getExprLoc());
585 else if (locksetContains(Mutex))
586 Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
592 // Ignore other (non thread-safety) attributes
599 } // end anonymous namespace
601 /// \brief Compute the intersection of two locksets and issue warnings for any
602 /// locks in the symmetric difference.
604 /// This function is used at a merge point in the CFG when comparing the lockset
605 /// of each branch being merged. For example, given the following sequence:
606 /// A; if () then B; else C; D; we need to check that the lockset after B and C
607 /// are the same. In the event of a difference, we use the intersection of these
608 /// two locksets at the start of D.
609 static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
610 const Lockset LSet1, const Lockset LSet2,
611 Lockset::Factory &Fact, LockErrorKind LEK) {
612 Lockset Intersection = LSet1;
613 for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
614 const MutexID &LSet2Mutex = I.getKey();
615 const LockData &LSet2LockData = I.getData();
616 if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
617 if (LD->LKind != LSet2LockData.LKind) {
618 Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
619 LSet2LockData.AcquireLoc,
621 if (LD->LKind != LK_Exclusive)
622 Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
625 Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
626 LSet2LockData.AcquireLoc, LEK);
630 for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
631 if (!LSet2.contains(I.getKey())) {
632 const MutexID &Mutex = I.getKey();
633 const LockData &MissingLock = I.getData();
634 Handler.handleMutexHeldEndOfScope(Mutex.getName(),
635 MissingLock.AcquireLoc, LEK);
636 Intersection = Fact.remove(Intersection, Mutex);
642 static Lockset addLock(ThreadSafetyHandler &Handler,
643 Lockset::Factory &LocksetFactory,
644 Lockset &LSet, Expr *LockExp, LockKind LK,
645 SourceLocation Loc) {
646 MutexID Mutex(LockExp, 0);
647 if (!Mutex.isValid()) {
648 Handler.handleInvalidLockExp(LockExp->getExprLoc());
651 LockData NewLock(Loc, LK);
652 return LocksetFactory.add(LSet, Mutex, NewLock);
656 namespace thread_safety {
657 /// \brief Check a function's CFG for thread-safety violations.
659 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
660 /// at the end of each block, and issue warnings for thread safety violations.
661 /// Each block in the CFG is traversed exactly once.
662 void runThreadSafetyAnalysis(AnalysisContext &AC,
663 ThreadSafetyHandler &Handler) {
664 CFG *CFGraph = AC.getCFG();
665 if (!CFGraph) return;
666 const Decl *D = AC.getDecl();
667 if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;
669 Lockset::Factory LocksetFactory;
671 // FIXME: Swith to SmallVector? Otherwise improve performance impact?
672 std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
673 LocksetFactory.getEmptyMap());
674 std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
675 LocksetFactory.getEmptyMap());
677 // We need to explore the CFG via a "topological" ordering.
678 // That way, we will be guaranteed to have information about required
679 // predecessor locksets when exploring a new block.
680 TopologicallySortedCFG SortedGraph(CFGraph);
681 CFGBlockSet VisitedBlocks(CFGraph);
683 if (!SortedGraph.empty() && D->hasAttrs()) {
684 const CFGBlock *FirstBlock = *SortedGraph.begin();
685 Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()];
686 const AttrVec &ArgAttrs = D->getAttrs();
687 for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
688 Attr *Attr = ArgAttrs[i];
689 SourceLocation AttrLoc = Attr->getLocation();
690 if (SharedLocksRequiredAttr *SLRAttr
691 = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
692 for (SharedLocksRequiredAttr::args_iterator
693 SLRIter = SLRAttr->args_begin(),
694 SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
695 InitialLockset = addLock(Handler, LocksetFactory, InitialLockset,
698 } else if (ExclusiveLocksRequiredAttr *ELRAttr
699 = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
700 for (ExclusiveLocksRequiredAttr::args_iterator
701 ELRIter = ELRAttr->args_begin(),
702 ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
703 InitialLockset = addLock(Handler, LocksetFactory, InitialLockset,
704 *ELRIter, LK_Exclusive,
710 for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
711 E = SortedGraph.end(); I!= E; ++I) {
712 const CFGBlock *CurrBlock = *I;
713 int CurrBlockID = CurrBlock->getBlockID();
715 VisitedBlocks.insert(CurrBlock);
717 // Use the default initial lockset in case there are no predecessors.
718 Lockset &Entryset = EntryLocksets[CurrBlockID];
719 Lockset &Exitset = ExitLocksets[CurrBlockID];
721 // Iterate through the predecessor blocks and warn if the lockset for all
722 // predecessors is not the same. We take the entry lockset of the current
723 // block to be the intersection of all previous locksets.
724 // FIXME: By keeping the intersection, we may output more errors in future
725 // for a lock which is not in the intersection, but was in the union. We
726 // may want to also keep the union in future. As an example, let's say
727 // the intersection contains Mutex L, and the union contains L and M.
728 // Later we unlock M. At this point, we would output an error because we
729 // never locked M; although the real error is probably that we forgot to
730 // lock M on all code paths. Conversely, let's say that later we lock M.
731 // In this case, we should compare against the intersection instead of the
732 // union because the real error is probably that we forgot to unlock M on
734 bool LocksetInitialized = false;
735 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
736 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
738 // if *PI -> CurrBlock is a back edge
739 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
742 int PrevBlockID = (*PI)->getBlockID();
743 if (!LocksetInitialized) {
744 Entryset = ExitLocksets[PrevBlockID];
745 LocksetInitialized = true;
747 Entryset = intersectAndWarn(Handler, Entryset,
748 ExitLocksets[PrevBlockID], LocksetFactory,
749 LEK_LockedSomePredecessors);
753 BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory);
754 for (CFGBlock::const_iterator BI = CurrBlock->begin(),
755 BE = CurrBlock->end(); BI != BE; ++BI) {
756 if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
757 LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
759 Exitset = LocksetBuilder.getLockset();
761 // For every back edge from CurrBlock (the end of the loop) to another block
762 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
763 // the one held at the beginning of FirstLoopBlock. We can look up the
764 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
765 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
766 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
768 // if CurrBlock -> *SI is *not* a back edge
769 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
772 CFGBlock *FirstLoopBlock = *SI;
773 Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
774 Lockset LoopEnd = ExitLocksets[CurrBlockID];
775 intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory,
776 LEK_LockedSomeLoopIterations);
780 Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()];
781 Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
783 // FIXME: Should we call this function for all blocks which exit the function?
784 intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory,
785 LEK_LockedAtEndOfFunction);
788 /// \brief Helper function that returns a LockKind required for the given level
790 LockKind getLockKindFromAccessKind(AccessKind AK) {
797 llvm_unreachable("Unknown AccessKind");
799 }} // end namespace clang::thread_safety