1 //===- ThreadSafety.cpp ---------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race
11 // conditions), based off of an annotation system.
13 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
14 // for more information.
16 //===----------------------------------------------------------------------===//
18 #include "clang/Analysis/Analyses/ThreadSafety.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclGroup.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/ExprCXX.h"
25 #include "clang/AST/OperationKinds.h"
26 #include "clang/AST/Stmt.h"
27 #include "clang/AST/StmtVisitor.h"
28 #include "clang/AST/Type.h"
29 #include "clang/Analysis/Analyses/PostOrderCFGView.h"
30 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
31 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
32 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
33 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
34 #include "clang/Analysis/AnalysisDeclContext.h"
35 #include "clang/Analysis/CFG.h"
36 #include "clang/Basic/LLVM.h"
37 #include "clang/Basic/OperatorKinds.h"
38 #include "clang/Basic/SourceLocation.h"
39 #include "clang/Basic/Specifiers.h"
40 #include "llvm/ADT/ArrayRef.h"
41 #include "llvm/ADT/DenseMap.h"
42 #include "llvm/ADT/ImmutableMap.h"
43 #include "llvm/ADT/Optional.h"
44 #include "llvm/ADT/STLExtras.h"
45 #include "llvm/ADT/SmallVector.h"
46 #include "llvm/ADT/StringRef.h"
47 #include "llvm/Support/Allocator.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/raw_ostream.h"
57 #include <type_traits>
61 using namespace clang;
62 using namespace threadSafety;
64 // Key method definition
65 ThreadSafetyHandler::~ThreadSafetyHandler() = default;
70 public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
74 /// Issue a warning about an invalid lock expression
75 static void warnInvalidLock(ThreadSafetyHandler &Handler,
76 const Expr *MutexExp, const NamedDecl *D,
77 const Expr *DeclExp, StringRef Kind) {
80 Loc = DeclExp->getExprLoc();
82 // FIXME: add a note about the attribute location in MutexExp or D
84 Handler.handleInvalidLockExp(Kind, Loc);
89 /// A set of CapabilityInfo objects, which are compiled from the
90 /// requires attributes on a function.
91 class CapExprSet : public SmallVector<CapabilityExpr, 4> {
93 /// Push M onto list, but discard duplicates.
94 void push_back_nodup(const CapabilityExpr &CapE) {
95 iterator It = std::find_if(begin(), end(),
96 [=](const CapabilityExpr &CapE2) {
97 return CapE.equals(CapE2);
107 /// This is a helper class that stores a fact that is known at a
108 /// particular point in program execution. Currently, a fact is a capability,
109 /// along with additional information, such as where it was acquired, whether
110 /// it is exclusive or shared, etc.
112 /// FIXME: this analysis does not currently support re-entrant locking.
113 class FactEntry : public CapabilityExpr {
115 /// Exclusive or shared.
118 /// Where it was acquired.
119 SourceLocation AcquireLoc;
121 /// True if the lock was asserted.
124 /// True if the lock was declared.
128 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
129 bool Asrt, bool Declrd = false)
130 : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt),
132 virtual ~FactEntry() = default;
134 LockKind kind() const { return LKind; }
135 SourceLocation loc() const { return AcquireLoc; }
136 bool asserted() const { return Asserted; }
137 bool declared() const { return Declared; }
139 void setDeclared(bool D) { Declared = D; }
142 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
143 SourceLocation JoinLoc, LockErrorKind LEK,
144 ThreadSafetyHandler &Handler) const = 0;
145 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
146 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
147 bool FullyRemove, ThreadSafetyHandler &Handler,
148 StringRef DiagKind) const = 0;
150 // Return true if LKind >= LK, where exclusive > shared
151 bool isAtLeast(LockKind LK) {
152 return (LKind == LK_Exclusive) || (LK == LK_Shared);
156 using FactID = unsigned short;
158 /// FactManager manages the memory for all facts that are created during
159 /// the analysis of a single routine.
162 std::vector<std::unique_ptr<FactEntry>> Facts;
165 FactID newFact(std::unique_ptr<FactEntry> Entry) {
166 Facts.push_back(std::move(Entry));
167 return static_cast<unsigned short>(Facts.size() - 1);
170 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
171 FactEntry &operator[](FactID F) { return *Facts[F]; }
174 /// A FactSet is the set of facts that are known to be true at a
175 /// particular program point. FactSets must be small, because they are
176 /// frequently copied, and are thus implemented as a set of indices into a
177 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2
178 /// locks, so we can get away with doing a linear search for lookup. Note
179 /// that a hashtable or map is inappropriate in this case, because lookups
180 /// may involve partial pattern matches, rather than exact matches.
183 using FactVec = SmallVector<FactID, 4>;
188 using iterator = FactVec::iterator;
189 using const_iterator = FactVec::const_iterator;
191 iterator begin() { return FactIDs.begin(); }
192 const_iterator begin() const { return FactIDs.begin(); }
194 iterator end() { return FactIDs.end(); }
195 const_iterator end() const { return FactIDs.end(); }
197 bool isEmpty() const { return FactIDs.size() == 0; }
199 // Return true if the set contains only negative facts
200 bool isEmpty(FactManager &FactMan) const {
201 for (const auto FID : *this) {
202 if (!FactMan[FID].negative())
208 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
210 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
211 FactID F = FM.newFact(std::move(Entry));
212 FactIDs.push_back(F);
216 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
217 unsigned n = FactIDs.size();
221 for (unsigned i = 0; i < n-1; ++i) {
222 if (FM[FactIDs[i]].matches(CapE)) {
223 FactIDs[i] = FactIDs[n-1];
228 if (FM[FactIDs[n-1]].matches(CapE)) {
235 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
236 return std::find_if(begin(), end(), [&](FactID ID) {
237 return FM[ID].matches(CapE);
241 FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
242 auto I = std::find_if(begin(), end(), [&](FactID ID) {
243 return FM[ID].matches(CapE);
245 return I != end() ? &FM[*I] : nullptr;
248 FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const {
249 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
250 return FM[ID].matchesUniv(CapE);
252 return I != end() ? &FM[*I] : nullptr;
255 FactEntry *findPartialMatch(FactManager &FM,
256 const CapabilityExpr &CapE) const {
257 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
258 return FM[ID].partiallyMatches(CapE);
260 return I != end() ? &FM[*I] : nullptr;
263 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
264 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
265 return FM[ID].valueDecl() == Vd;
271 class ThreadSafetyAnalyzer;
276 namespace threadSafety {
280 using BeforeVect = SmallVector<const ValueDecl *, 4>;
286 BeforeInfo() = default;
287 BeforeInfo(BeforeInfo &&) = default;
291 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
292 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
295 BeforeSet() = default;
297 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
298 ThreadSafetyAnalyzer& Analyzer);
300 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
301 ThreadSafetyAnalyzer &Analyzer);
303 void checkBeforeAfter(const ValueDecl* Vd,
305 ThreadSafetyAnalyzer& Analyzer,
306 SourceLocation Loc, StringRef CapKind);
313 } // namespace threadSafety
318 class LocalVariableMap;
320 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
322 /// A side (entry or exit) of a CFG node.
323 enum CFGBlockSide { CBS_Entry, CBS_Exit };
325 /// CFGBlockInfo is a struct which contains all the information that is
326 /// maintained for each block in the CFG. See LocalVariableMap for more
327 /// information about the contexts.
328 struct CFGBlockInfo {
329 // Lockset held at entry to block
332 // Lockset held at exit from block
335 // Context held at entry to block
336 LocalVarContext EntryContext;
338 // Context held at exit from block
339 LocalVarContext ExitContext;
341 // Location of first statement in block
342 SourceLocation EntryLoc;
344 // Location of last statement in block.
345 SourceLocation ExitLoc;
347 // Used to replay contexts later
350 // Is this block reachable?
351 bool Reachable = false;
353 const FactSet &getSet(CFGBlockSide Side) const {
354 return Side == CBS_Entry ? EntrySet : ExitSet;
357 SourceLocation getLocation(CFGBlockSide Side) const {
358 return Side == CBS_Entry ? EntryLoc : ExitLoc;
362 CFGBlockInfo(LocalVarContext EmptyCtx)
363 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
366 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
369 // A LocalVariableMap maintains a map from local variables to their currently
370 // valid definitions. It provides SSA-like functionality when traversing the
371 // CFG. Like SSA, each definition or assignment to a variable is assigned a
372 // unique name (an integer), which acts as the SSA name for that definition.
373 // The total set of names is shared among all CFG basic blocks.
374 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs
375 // with their SSA-names. Instead, we compute a Context for each point in the
376 // code, which maps local variables to the appropriate SSA-name. This map
377 // changes with each assignment.
379 // The map is computed in a single pass over the CFG. Subsequent analyses can
380 // then query the map to find the appropriate Context for a statement, and use
381 // that Context to look up the definitions of variables.
382 class LocalVariableMap {
384 using Context = LocalVarContext;
386 /// A VarDefinition consists of an expression, representing the value of the
387 /// variable, along with the context in which that expression should be
388 /// interpreted. A reference VarDefinition does not itself contain this
389 /// information, but instead contains a pointer to a previous VarDefinition.
390 struct VarDefinition {
392 friend class LocalVariableMap;
394 // The original declaration for this variable.
395 const NamedDecl *Dec;
397 // The expression for this variable, OR
398 const Expr *Exp = nullptr;
400 // Reference to another VarDefinition
403 // The map with which Exp should be interpreted.
406 bool isReference() { return !Exp; }
409 // Create ordinary variable definition
410 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
411 : Dec(D), Exp(E), Ctx(C) {}
413 // Create reference to previous definition
414 VarDefinition(const NamedDecl *D, unsigned R, Context C)
415 : Dec(D), Ref(R), Ctx(C) {}
419 Context::Factory ContextFactory;
420 std::vector<VarDefinition> VarDefinitions;
421 std::vector<unsigned> CtxIndices;
422 std::vector<std::pair<Stmt *, Context>> SavedContexts;
426 // index 0 is a placeholder for undefined variables (aka phi-nodes).
427 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
430 /// Look up a definition, within the given context.
431 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
432 const unsigned *i = Ctx.lookup(D);
435 assert(*i < VarDefinitions.size());
436 return &VarDefinitions[*i];
439 /// Look up the definition for D within the given context. Returns
440 /// NULL if the expression is not statically known. If successful, also
441 /// modifies Ctx to hold the context of the return Expr.
442 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
443 const unsigned *P = Ctx.lookup(D);
449 if (VarDefinitions[i].Exp) {
450 Ctx = VarDefinitions[i].Ctx;
451 return VarDefinitions[i].Exp;
453 i = VarDefinitions[i].Ref;
458 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
460 /// Return the next context after processing S. This function is used by
461 /// clients of the class to get the appropriate context when traversing the
462 /// CFG. It must be called for every assignment or DeclStmt.
463 Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
464 if (SavedContexts[CtxIndex+1].first == S) {
466 Context Result = SavedContexts[CtxIndex].second;
472 void dumpVarDefinitionName(unsigned i) {
474 llvm::errs() << "Undefined";
477 const NamedDecl *Dec = VarDefinitions[i].Dec;
479 llvm::errs() << "<<NULL>>";
482 Dec->printName(llvm::errs());
483 llvm::errs() << "." << i << " " << ((const void*) Dec);
486 /// Dumps an ASCII representation of the variable map to llvm::errs()
488 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
489 const Expr *Exp = VarDefinitions[i].Exp;
490 unsigned Ref = VarDefinitions[i].Ref;
492 dumpVarDefinitionName(i);
493 llvm::errs() << " = ";
494 if (Exp) Exp->dump();
496 dumpVarDefinitionName(Ref);
497 llvm::errs() << "\n";
502 /// Dumps an ASCII representation of a Context to llvm::errs()
503 void dumpContext(Context C) {
504 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
505 const NamedDecl *D = I.getKey();
506 D->printName(llvm::errs());
507 const unsigned *i = C.lookup(D);
508 llvm::errs() << " -> ";
509 dumpVarDefinitionName(*i);
510 llvm::errs() << "\n";
514 /// Builds the variable map.
515 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
516 std::vector<CFGBlockInfo> &BlockInfo);
519 friend class VarMapBuilder;
521 // Get the current context index
522 unsigned getContextIndex() { return SavedContexts.size()-1; }
524 // Save the current context for later replay
525 void saveContext(Stmt *S, Context C) {
526 SavedContexts.push_back(std::make_pair(S, C));
529 // Adds a new definition to the given context, and returns a new context.
530 // This method should be called when declaring a new variable.
531 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
532 assert(!Ctx.contains(D));
533 unsigned newID = VarDefinitions.size();
534 Context NewCtx = ContextFactory.add(Ctx, D, newID);
535 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
539 // Add a new reference to an existing definition.
540 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
541 unsigned newID = VarDefinitions.size();
542 Context NewCtx = ContextFactory.add(Ctx, D, newID);
543 VarDefinitions.push_back(VarDefinition(D, i, Ctx));
547 // Updates a definition only if that definition is already in the map.
548 // This method should be called when assigning to an existing variable.
549 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
550 if (Ctx.contains(D)) {
551 unsigned newID = VarDefinitions.size();
552 Context NewCtx = ContextFactory.remove(Ctx, D);
553 NewCtx = ContextFactory.add(NewCtx, D, newID);
554 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
560 // Removes a definition from the context, but keeps the variable name
561 // as a valid variable. The index 0 is a placeholder for cleared definitions.
562 Context clearDefinition(const NamedDecl *D, Context Ctx) {
563 Context NewCtx = Ctx;
564 if (NewCtx.contains(D)) {
565 NewCtx = ContextFactory.remove(NewCtx, D);
566 NewCtx = ContextFactory.add(NewCtx, D, 0);
571 // Remove a definition entirely frmo the context.
572 Context removeDefinition(const NamedDecl *D, Context Ctx) {
573 Context NewCtx = Ctx;
574 if (NewCtx.contains(D)) {
575 NewCtx = ContextFactory.remove(NewCtx, D);
580 Context intersectContexts(Context C1, Context C2);
581 Context createReferenceContext(Context C);
582 void intersectBackEdge(Context C1, Context C2);
587 // This has to be defined after LocalVariableMap.
588 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
589 return CFGBlockInfo(M.getEmptyContext());
594 /// Visitor which builds a LocalVariableMap
595 class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
597 LocalVariableMap* VMap;
598 LocalVariableMap::Context Ctx;
600 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
601 : VMap(VM), Ctx(C) {}
603 void VisitDeclStmt(DeclStmt *S);
604 void VisitBinaryOperator(BinaryOperator *BO);
609 // Add new local variables to the variable map
610 void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
611 bool modifiedCtx = false;
612 DeclGroupRef DGrp = S->getDeclGroup();
613 for (const auto *D : DGrp) {
614 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
615 const Expr *E = VD->getInit();
617 // Add local variables with trivial type to the variable map
618 QualType T = VD->getType();
619 if (T.isTrivialType(VD->getASTContext())) {
620 Ctx = VMap->addDefinition(VD, E, Ctx);
626 VMap->saveContext(S, Ctx);
629 // Update local variable definitions in variable map
630 void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
631 if (!BO->isAssignmentOp())
634 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
636 // Update the variable map and current context.
637 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
638 const ValueDecl *VDec = DRE->getDecl();
639 if (Ctx.lookup(VDec)) {
640 if (BO->getOpcode() == BO_Assign)
641 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
643 // FIXME -- handle compound assignment operators
644 Ctx = VMap->clearDefinition(VDec, Ctx);
645 VMap->saveContext(BO, Ctx);
650 // Computes the intersection of two contexts. The intersection is the
651 // set of variables which have the same definition in both contexts;
652 // variables with different definitions are discarded.
653 LocalVariableMap::Context
654 LocalVariableMap::intersectContexts(Context C1, Context C2) {
656 for (const auto &P : C1) {
657 const NamedDecl *Dec = P.first;
658 const unsigned *i2 = C2.lookup(Dec);
659 if (!i2) // variable doesn't exist on second path
660 Result = removeDefinition(Dec, Result);
661 else if (*i2 != P.second) // variable exists, but has different definition
662 Result = clearDefinition(Dec, Result);
667 // For every variable in C, create a new variable that refers to the
668 // definition in C. Return a new context that contains these new variables.
669 // (We use this for a naive implementation of SSA on loop back-edges.)
670 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
671 Context Result = getEmptyContext();
672 for (const auto &P : C)
673 Result = addReference(P.first, P.second, Result);
677 // This routine also takes the intersection of C1 and C2, but it does so by
678 // altering the VarDefinitions. C1 must be the result of an earlier call to
679 // createReferenceContext.
680 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
681 for (const auto &P : C1) {
682 unsigned i1 = P.second;
683 VarDefinition *VDef = &VarDefinitions[i1];
684 assert(VDef->isReference());
686 const unsigned *i2 = C2.lookup(P.first);
687 if (!i2 || (*i2 != i1))
688 VDef->Ref = 0; // Mark this variable as undefined
692 // Traverse the CFG in topological order, so all predecessors of a block
693 // (excluding back-edges) are visited before the block itself. At
694 // each point in the code, we calculate a Context, which holds the set of
695 // variable definitions which are visible at that point in execution.
696 // Visible variables are mapped to their definitions using an array that
697 // contains all definitions.
699 // At join points in the CFG, the set is computed as the intersection of
700 // the incoming sets along each edge, E.g.
702 // { Context | VarDefinitions }
703 // int x = 0; { x -> x1 | x1 = 0 }
704 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
705 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
706 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
707 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
709 // This is essentially a simpler and more naive version of the standard SSA
710 // algorithm. Those definitions that remain in the intersection are from blocks
711 // that strictly dominate the current block. We do not bother to insert proper
712 // phi nodes, because they are not used in our analysis; instead, wherever
713 // a phi node would be required, we simply remove that definition from the
714 // context (E.g. x above).
716 // The initial traversal does not capture back-edges, so those need to be
717 // handled on a separate pass. Whenever the first pass encounters an
718 // incoming back edge, it duplicates the context, creating new definitions
719 // that refer back to the originals. (These correspond to places where SSA
720 // might have to insert a phi node.) On the second pass, these definitions are
721 // set to NULL if the variable has changed on the back-edge (i.e. a phi
722 // node was actually required.) E.g.
724 // { Context | VarDefinitions }
725 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
726 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
727 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
728 // ... { y -> y1 | x3 = 2, x2 = 1, ... }
729 void LocalVariableMap::traverseCFG(CFG *CFGraph,
730 const PostOrderCFGView *SortedGraph,
731 std::vector<CFGBlockInfo> &BlockInfo) {
732 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
734 CtxIndices.resize(CFGraph->getNumBlockIDs());
736 for (const auto *CurrBlock : *SortedGraph) {
737 int CurrBlockID = CurrBlock->getBlockID();
738 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
740 VisitedBlocks.insert(CurrBlock);
742 // Calculate the entry context for the current block
743 bool HasBackEdges = false;
745 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
746 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
747 // if *PI -> CurrBlock is a back edge, so skip it
748 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
753 int PrevBlockID = (*PI)->getBlockID();
754 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
757 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
761 CurrBlockInfo->EntryContext =
762 intersectContexts(CurrBlockInfo->EntryContext,
763 PrevBlockInfo->ExitContext);
767 // Duplicate the context if we have back-edges, so we can call
768 // intersectBackEdges later.
770 CurrBlockInfo->EntryContext =
771 createReferenceContext(CurrBlockInfo->EntryContext);
773 // Create a starting context index for the current block
774 saveContext(nullptr, CurrBlockInfo->EntryContext);
775 CurrBlockInfo->EntryIndex = getContextIndex();
777 // Visit all the statements in the basic block.
778 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
779 for (const auto &BI : *CurrBlock) {
780 switch (BI.getKind()) {
781 case CFGElement::Statement: {
782 CFGStmt CS = BI.castAs<CFGStmt>();
783 VMapBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
790 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
792 // Mark variables on back edges as "unknown" if they've been changed.
793 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
794 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
795 // if CurrBlock -> *SI is *not* a back edge
796 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
799 CFGBlock *FirstLoopBlock = *SI;
800 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
801 Context LoopEnd = CurrBlockInfo->ExitContext;
802 intersectBackEdge(LoopBegin, LoopEnd);
806 // Put an extra entry at the end of the indexed context array
807 unsigned exitID = CFGraph->getExit().getBlockID();
808 saveContext(nullptr, BlockInfo[exitID].ExitContext);
811 /// Find the appropriate source locations to use when producing diagnostics for
812 /// each block in the CFG.
813 static void findBlockLocations(CFG *CFGraph,
814 const PostOrderCFGView *SortedGraph,
815 std::vector<CFGBlockInfo> &BlockInfo) {
816 for (const auto *CurrBlock : *SortedGraph) {
817 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
819 // Find the source location of the last statement in the block, if the
820 // block is not empty.
821 if (const Stmt *S = CurrBlock->getTerminator()) {
822 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
824 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
825 BE = CurrBlock->rend(); BI != BE; ++BI) {
826 // FIXME: Handle other CFGElement kinds.
827 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
828 CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
834 if (CurrBlockInfo->ExitLoc.isValid()) {
835 // This block contains at least one statement. Find the source location
836 // of the first statement in the block.
837 for (const auto &BI : *CurrBlock) {
838 // FIXME: Handle other CFGElement kinds.
839 if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
840 CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
844 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
845 CurrBlock != &CFGraph->getExit()) {
846 // The block is empty, and has a single predecessor. Use its exit
848 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
849 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
856 class LockableFactEntry : public FactEntry {
858 /// managed by ScopedLockable object
862 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
863 bool Mng = false, bool Asrt = false)
864 : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
867 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
868 SourceLocation JoinLoc, LockErrorKind LEK,
869 ThreadSafetyHandler &Handler) const override {
870 if (!Managed && !asserted() && !negative() && !isUniversal()) {
871 Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
876 void handleUnlock(FactSet &FSet, FactManager &FactMan,
877 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
878 bool FullyRemove, ThreadSafetyHandler &Handler,
879 StringRef DiagKind) const override {
880 FSet.removeLock(FactMan, Cp);
881 if (!Cp.negative()) {
882 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
883 !Cp, LK_Exclusive, UnlockLoc));
888 class ScopedLockableFactEntry : public FactEntry {
890 SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
893 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
894 const CapExprSet &Excl, const CapExprSet &Shrd)
895 : FactEntry(CE, LK_Exclusive, Loc, false) {
896 for (const auto &M : Excl)
897 UnderlyingMutexes.push_back(M.sexpr());
898 for (const auto &M : Shrd)
899 UnderlyingMutexes.push_back(M.sexpr());
903 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
904 SourceLocation JoinLoc, LockErrorKind LEK,
905 ThreadSafetyHandler &Handler) const override {
906 for (const auto *UnderlyingMutex : UnderlyingMutexes) {
907 if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
908 // If this scoped lock manages another mutex, and if the underlying
909 // mutex is still held, then warn about the underlying mutex.
910 Handler.handleMutexHeldEndOfScope(
911 "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
916 void handleUnlock(FactSet &FSet, FactManager &FactMan,
917 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
918 bool FullyRemove, ThreadSafetyHandler &Handler,
919 StringRef DiagKind) const override {
920 assert(!Cp.negative() && "Managing object cannot be negative.");
921 for (const auto *UnderlyingMutex : UnderlyingMutexes) {
922 CapabilityExpr UnderCp(UnderlyingMutex, false);
923 auto UnderEntry = llvm::make_unique<LockableFactEntry>(
924 !UnderCp, LK_Exclusive, UnlockLoc);
927 // We're destroying the managing object.
928 // Remove the underlying mutex if it exists; but don't warn.
929 if (FSet.findLock(FactMan, UnderCp)) {
930 FSet.removeLock(FactMan, UnderCp);
931 FSet.addLock(FactMan, std::move(UnderEntry));
934 // We're releasing the underlying mutex, but not destroying the
935 // managing object. Warn on dual release.
936 if (!FSet.findLock(FactMan, UnderCp)) {
937 Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
940 FSet.removeLock(FactMan, UnderCp);
941 FSet.addLock(FactMan, std::move(UnderEntry));
945 FSet.removeLock(FactMan, Cp);
949 /// Class which implements the core thread safety analysis routines.
950 class ThreadSafetyAnalyzer {
951 friend class BuildLockset;
952 friend class threadSafety::BeforeSet;
954 llvm::BumpPtrAllocator Bpa;
955 threadSafety::til::MemRegionRef Arena;
956 threadSafety::SExprBuilder SxBuilder;
958 ThreadSafetyHandler &Handler;
959 const CXXMethodDecl *CurrentMethod;
960 LocalVariableMap LocalVarMap;
962 std::vector<CFGBlockInfo> BlockInfo;
964 BeforeSet *GlobalBeforeSet;
967 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
968 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
970 bool inCurrentScope(const CapabilityExpr &CapE);
972 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
973 StringRef DiagKind, bool ReqAttr = false);
974 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
975 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
978 template <typename AttrType>
979 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
980 const NamedDecl *D, VarDecl *SelfDecl = nullptr);
982 template <class AttrType>
983 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
985 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
986 Expr *BrE, bool Neg);
988 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
991 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
992 const CFGBlock* PredBlock,
993 const CFGBlock *CurrBlock);
995 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
996 SourceLocation JoinLoc,
997 LockErrorKind LEK1, LockErrorKind LEK2,
1000 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
1001 SourceLocation JoinLoc, LockErrorKind LEK1,
1003 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
1006 void runAnalysis(AnalysisDeclContext &AC);
1011 /// Process acquired_before and acquired_after attributes on Vd.
1012 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1013 ThreadSafetyAnalyzer& Analyzer) {
1014 // Create a new entry for Vd.
1015 BeforeInfo *Info = nullptr;
1017 // Keep InfoPtr in its own scope in case BMap is modified later and the
1018 // reference becomes invalid.
1019 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1021 InfoPtr.reset(new BeforeInfo());
1022 Info = InfoPtr.get();
1025 for (const auto *At : Vd->attrs()) {
1026 switch (At->getKind()) {
1027 case attr::AcquiredBefore: {
1028 const auto *A = cast<AcquiredBeforeAttr>(At);
1030 // Read exprs from the attribute, and add them to BeforeVect.
1031 for (const auto *Arg : A->args()) {
1033 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1034 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1035 Info->Vect.push_back(Cpvd);
1036 const auto It = BMap.find(Cpvd);
1037 if (It == BMap.end())
1038 insertAttrExprs(Cpvd, Analyzer);
1043 case attr::AcquiredAfter: {
1044 const auto *A = cast<AcquiredAfterAttr>(At);
1046 // Read exprs from the attribute, and add them to BeforeVect.
1047 for (const auto *Arg : A->args()) {
1049 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1050 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1051 // Get entry for mutex listed in attribute
1052 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1053 ArgInfo->Vect.push_back(Vd);
1066 BeforeSet::BeforeInfo *
1067 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1068 ThreadSafetyAnalyzer &Analyzer) {
1069 auto It = BMap.find(Vd);
1070 BeforeInfo *Info = nullptr;
1071 if (It == BMap.end())
1072 Info = insertAttrExprs(Vd, Analyzer);
1074 Info = It->second.get();
1075 assert(Info && "BMap contained nullptr?");
1079 /// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1080 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1081 const FactSet& FSet,
1082 ThreadSafetyAnalyzer& Analyzer,
1083 SourceLocation Loc, StringRef CapKind) {
1084 SmallVector<BeforeInfo*, 8> InfoVect;
1086 // Do a depth-first traversal of Vd.
1087 // Return true if there are cycles.
1088 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1092 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1094 if (Info->Visited == 1)
1097 if (Info->Visited == 2)
1100 if (Info->Vect.empty())
1103 InfoVect.push_back(Info);
1105 for (const auto *Vdb : Info->Vect) {
1106 // Exclude mutexes in our immediate before set.
1107 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1108 StringRef L1 = StartVd->getName();
1109 StringRef L2 = Vdb->getName();
1110 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1112 // Transitively search other before sets, and warn on cycles.
1113 if (traverse(Vdb)) {
1114 if (CycMap.find(Vd) == CycMap.end()) {
1115 CycMap.insert(std::make_pair(Vd, true));
1116 StringRef L1 = Vd->getName();
1117 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1127 for (auto *Info : InfoVect)
1131 /// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1132 static const ValueDecl *getValueDecl(const Expr *Exp) {
1133 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1134 return getValueDecl(CE->getSubExpr());
1136 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1137 return DR->getDecl();
1139 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1140 return ME->getMemberDecl();
1147 template <typename Ty>
1148 class has_arg_iterator_range {
1149 using yes = char[1];
1152 template <typename Inner>
1153 static yes& test(Inner *I, decltype(I->args()) * = nullptr);
1156 static no& test(...);
1159 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
1164 static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
1165 return A->getName();
1168 static StringRef ClassifyDiagnostic(QualType VDT) {
1169 // We need to look at the declaration of the type of the value to determine
1170 // which it is. The type should either be a record or a typedef, or a pointer
1171 // or reference thereof.
1172 if (const auto *RT = VDT->getAs<RecordType>()) {
1173 if (const auto *RD = RT->getDecl())
1174 if (const auto *CA = RD->getAttr<CapabilityAttr>())
1175 return ClassifyDiagnostic(CA);
1176 } else if (const auto *TT = VDT->getAs<TypedefType>()) {
1177 if (const auto *TD = TT->getDecl())
1178 if (const auto *CA = TD->getAttr<CapabilityAttr>())
1179 return ClassifyDiagnostic(CA);
1180 } else if (VDT->isPointerType() || VDT->isReferenceType())
1181 return ClassifyDiagnostic(VDT->getPointeeType());
1186 static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
1187 assert(VD && "No ValueDecl passed");
1189 // The ValueDecl is the declaration of a mutex or role (hopefully).
1190 return ClassifyDiagnostic(VD->getType());
1193 template <typename AttrTy>
1194 static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value,
1196 ClassifyDiagnostic(const AttrTy *A) {
1197 if (const ValueDecl *VD = getValueDecl(A->getArg()))
1198 return ClassifyDiagnostic(VD);
1202 template <typename AttrTy>
1203 static typename std::enable_if<has_arg_iterator_range<AttrTy>::value,
1205 ClassifyDiagnostic(const AttrTy *A) {
1206 for (const auto *Arg : A->args()) {
1207 if (const ValueDecl *VD = getValueDecl(Arg))
1208 return ClassifyDiagnostic(VD);
1213 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1216 if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
1217 const auto *VD = P->clangDecl();
1219 return VD->getDeclContext() == CurrentMethod->getDeclContext();
1224 /// Add a new lock to the lockset, warning if the lock is already there.
1225 /// \param ReqAttr -- true if this is part of an initial Requires attribute.
1226 void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
1227 std::unique_ptr<FactEntry> Entry,
1228 StringRef DiagKind, bool ReqAttr) {
1229 if (Entry->shouldIgnore())
1232 if (!ReqAttr && !Entry->negative()) {
1233 // look for the negative capability, and remove it from the fact set.
1234 CapabilityExpr NegC = !*Entry;
1235 FactEntry *Nen = FSet.findLock(FactMan, NegC);
1237 FSet.removeLock(FactMan, NegC);
1240 if (inCurrentScope(*Entry) && !Entry->asserted())
1241 Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
1242 NegC.toString(), Entry->loc());
1246 // Check before/after constraints
1247 if (Handler.issueBetaWarnings() &&
1248 !Entry->asserted() && !Entry->declared()) {
1249 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1250 Entry->loc(), DiagKind);
1253 // FIXME: Don't always warn when we have support for reentrant locks.
1254 if (FSet.findLock(FactMan, *Entry)) {
1255 if (!Entry->asserted())
1256 Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
1258 FSet.addLock(FactMan, std::move(Entry));
1262 /// Remove a lock from the lockset, warning if the lock is not there.
1263 /// \param UnlockLoc The source location of the unlock (only used in error msg)
1264 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1265 SourceLocation UnlockLoc,
1266 bool FullyRemove, LockKind ReceivedKind,
1267 StringRef DiagKind) {
1268 if (Cp.shouldIgnore())
1271 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1273 Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
1277 // Generic lock removal doesn't care about lock kind mismatches, but
1278 // otherwise diagnose when the lock kinds are mismatched.
1279 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1280 Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(),
1281 LDat->kind(), ReceivedKind, UnlockLoc);
1284 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
1288 /// Extract the list of mutexIDs from the attribute on an expression,
1289 /// and push them onto Mtxs, discarding any duplicates.
1290 template <typename AttrType>
1291 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1292 Expr *Exp, const NamedDecl *D,
1293 VarDecl *SelfDecl) {
1294 if (Attr->args_size() == 0) {
1295 // The mutex held is the "this" object.
1296 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
1297 if (Cp.isInvalid()) {
1298 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1302 if (!Cp.shouldIgnore())
1303 Mtxs.push_back_nodup(Cp);
1307 for (const auto *Arg : Attr->args()) {
1308 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
1309 if (Cp.isInvalid()) {
1310 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
1314 if (!Cp.shouldIgnore())
1315 Mtxs.push_back_nodup(Cp);
1319 /// Extract the list of mutexIDs from a trylock attribute. If the
1320 /// trylock applies to the given edge, then push them onto Mtxs, discarding
1322 template <class AttrType>
1323 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1324 Expr *Exp, const NamedDecl *D,
1325 const CFGBlock *PredBlock,
1326 const CFGBlock *CurrBlock,
1327 Expr *BrE, bool Neg) {
1328 // Find out which branch has the lock
1329 bool branch = false;
1330 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1331 branch = BLE->getValue();
1332 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1333 branch = ILE->getValue().getBoolValue();
1335 int branchnum = branch ? 0 : 1;
1337 branchnum = !branchnum;
1339 // If we've taken the trylock branch, then add the lock
1341 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1342 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1343 if (*SI == CurrBlock && i == branchnum)
1344 getMutexIDs(Mtxs, Attr, Exp, D);
1348 static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1349 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
1352 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1353 TCond = BLE->getValue();
1355 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1356 TCond = ILE->getValue().getBoolValue();
1358 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1359 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1363 // If Cond can be traced back to a function call, return the call expression.
1364 // The negate variable should be called with false, and will be set to true
1365 // if the function call is negated, e.g. if (!mu.tryLock(...))
1366 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1372 if (const auto *CallExp = dyn_cast<CallExpr>(Cond))
1374 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1375 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1376 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1377 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1378 else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond))
1379 return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
1380 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1381 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1382 return getTrylockCallExpr(E, C, Negate);
1384 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1385 if (UOP->getOpcode() == UO_LNot) {
1387 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1391 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1392 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1393 if (BOP->getOpcode() == BO_NE)
1397 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1398 if (!TCond) Negate = !Negate;
1399 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1402 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1403 if (!TCond) Negate = !Negate;
1404 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1408 if (BOP->getOpcode() == BO_LAnd) {
1409 // LHS must have been evaluated in a different block.
1410 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1412 if (BOP->getOpcode() == BO_LOr)
1413 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1419 /// Find the lockset that holds on the edge between PredBlock
1420 /// and CurrBlock. The edge set is the exit set of PredBlock (passed
1421 /// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1422 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1423 const FactSet &ExitSet,
1424 const CFGBlock *PredBlock,
1425 const CFGBlock *CurrBlock) {
1428 const Stmt *Cond = PredBlock->getTerminatorCondition();
1432 bool Negate = false;
1433 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1434 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1435 StringRef CapDiagKind = "mutex";
1437 auto *Exp = const_cast<CallExpr *>(getTrylockCallExpr(Cond, LVarCtx, Negate));
1441 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1442 if(!FunDecl || !FunDecl->hasAttrs())
1445 CapExprSet ExclusiveLocksToAdd;
1446 CapExprSet SharedLocksToAdd;
1448 // If the condition is a call to a Trylock function, then grab the attributes
1449 for (const auto *Attr : FunDecl->attrs()) {
1450 switch (Attr->getKind()) {
1451 case attr::TryAcquireCapability: {
1452 auto *A = cast<TryAcquireCapabilityAttr>(Attr);
1453 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
1454 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
1456 CapDiagKind = ClassifyDiagnostic(A);
1459 case attr::ExclusiveTrylockFunction: {
1460 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
1461 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
1462 PredBlock, CurrBlock, A->getSuccessValue(), Negate);
1463 CapDiagKind = ClassifyDiagnostic(A);
1466 case attr::SharedTrylockFunction: {
1467 const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
1468 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
1469 PredBlock, CurrBlock, A->getSuccessValue(), Negate);
1470 CapDiagKind = ClassifyDiagnostic(A);
1478 // Add and remove locks.
1479 SourceLocation Loc = Exp->getExprLoc();
1480 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1481 addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
1484 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1485 addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
1492 /// We use this class to visit different types of expressions in
1493 /// CFGBlocks, and build up the lockset.
1494 /// An expression may cause us to add or remove locks from the lockset, or else
1495 /// output error messages related to missing locks.
1496 /// FIXME: In future, we may be able to not inherit from a visitor.
1497 class BuildLockset : public StmtVisitor<BuildLockset> {
1498 friend class ThreadSafetyAnalyzer;
1500 ThreadSafetyAnalyzer *Analyzer;
1502 LocalVariableMap::Context LVarCtx;
1506 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
1507 Expr *MutexExp, ProtectedOperationKind POK,
1508 StringRef DiagKind, SourceLocation Loc);
1509 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
1510 StringRef DiagKind);
1512 void checkAccess(const Expr *Exp, AccessKind AK,
1513 ProtectedOperationKind POK = POK_VarAccess);
1514 void checkPtAccess(const Expr *Exp, AccessKind AK,
1515 ProtectedOperationKind POK = POK_VarAccess);
1517 void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
1520 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
1521 : StmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1522 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
1524 void VisitUnaryOperator(UnaryOperator *UO);
1525 void VisitBinaryOperator(BinaryOperator *BO);
1526 void VisitCastExpr(CastExpr *CE);
1527 void VisitCallExpr(CallExpr *Exp);
1528 void VisitCXXConstructExpr(CXXConstructExpr *Exp);
1529 void VisitDeclStmt(DeclStmt *S);
1534 /// Warn if the LSet does not contain a lock sufficient to protect access
1535 /// of at least the passed in AccessKind.
1536 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
1537 AccessKind AK, Expr *MutexExp,
1538 ProtectedOperationKind POK,
1539 StringRef DiagKind, SourceLocation Loc) {
1540 LockKind LK = getLockKindFromAccessKind(AK);
1542 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
1543 if (Cp.isInvalid()) {
1544 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
1546 } else if (Cp.shouldIgnore()) {
1550 if (Cp.negative()) {
1551 // Negative capabilities act like locks excluded
1552 FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
1554 Analyzer->Handler.handleFunExcludesLock(
1555 DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
1559 // If this does not refer to a negative capability in the same class,
1561 if (!Analyzer->inCurrentScope(Cp))
1564 // Otherwise the negative requirement must be propagated to the caller.
1565 LDat = FSet.findLock(Analyzer->FactMan, Cp);
1567 Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(),
1573 FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
1574 bool NoError = true;
1576 // No exact match found. Look for a partial match.
1577 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
1579 // Warn that there's no precise match.
1580 std::string PartMatchStr = LDat->toString();
1581 StringRef PartMatchName(PartMatchStr);
1582 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
1583 LK, Loc, &PartMatchName);
1585 // Warn that there's no match at all.
1586 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
1591 // Make sure the mutex we found is the right kind.
1592 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1593 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
1598 /// Warn if the LSet contains the given lock.
1599 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
1600 Expr *MutexExp, StringRef DiagKind) {
1601 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
1602 if (Cp.isInvalid()) {
1603 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
1605 } else if (Cp.shouldIgnore()) {
1609 FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp);
1611 Analyzer->Handler.handleFunExcludesLock(
1612 DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
1616 /// Checks guarded_by and pt_guarded_by attributes.
1617 /// Whenever we identify an access (read or write) to a DeclRefExpr that is
1618 /// marked with guarded_by, we must ensure the appropriate mutexes are held.
1619 /// Similarly, we check if the access is to an expression that dereferences
1620 /// a pointer marked with pt_guarded_by.
1621 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
1622 ProtectedOperationKind POK) {
1623 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1625 SourceLocation Loc = Exp->getExprLoc();
1627 // Local variables of reference type cannot be re-assigned;
1628 // map them to their initializer.
1629 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1630 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1631 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1632 if (const auto *E = VD->getInit()) {
1640 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1642 if (UO->getOpcode() == UO_Deref)
1643 checkPtAccess(UO->getSubExpr(), AK, POK);
1647 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1648 checkPtAccess(AE->getLHS(), AK, POK);
1652 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1654 checkPtAccess(ME->getBase(), AK, POK);
1656 checkAccess(ME->getBase(), AK, POK);
1659 const ValueDecl *D = getValueDecl(Exp);
1660 if (!D || !D->hasAttrs())
1663 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
1664 Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
1667 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1668 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
1669 ClassifyDiagnostic(I), Loc);
1672 /// Checks pt_guarded_by and pt_guarded_var attributes.
1673 /// POK is the same operationKind that was passed to checkAccess.
1674 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
1675 ProtectedOperationKind POK) {
1677 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1678 Exp = PE->getSubExpr();
1681 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1682 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1683 // If it's an actual array, and not a pointer, then it's elements
1684 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1685 checkAccess(CE->getSubExpr(), AK, POK);
1688 Exp = CE->getSubExpr();
1694 // Pass by reference warnings are under a different flag.
1695 ProtectedOperationKind PtPOK = POK_VarDereference;
1696 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
1698 const ValueDecl *D = getValueDecl(Exp);
1699 if (!D || !D->hasAttrs())
1702 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
1703 Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
1706 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1707 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
1708 ClassifyDiagnostic(I), Exp->getExprLoc());
1711 /// Process a function call, method call, constructor call,
1712 /// or destructor call. This involves looking at the attributes on the
1713 /// corresponding function/method/constructor/destructor, issuing warnings,
1714 /// and updating the locksets accordingly.
1716 /// FIXME: For classes annotated with one of the guarded annotations, we need
1717 /// to treat const method calls as reads and non-const method calls as writes,
1718 /// and check that the appropriate locks are held. Non-const method calls with
1719 /// the same signature as const method calls can be also treated as reads.
1721 void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
1722 SourceLocation Loc = Exp->getExprLoc();
1723 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
1724 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1725 CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
1726 StringRef CapDiagKind = "mutex";
1728 // Figure out if we're constructing an object of scoped lockable class
1729 bool isScopedVar = false;
1731 if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) {
1732 const CXXRecordDecl* PD = CD->getParent();
1733 if (PD && PD->hasAttr<ScopedLockableAttr>())
1738 for(const Attr *At : D->attrs()) {
1739 switch (At->getKind()) {
1740 // When we encounter a lock function, we need to add the lock to our
1742 case attr::AcquireCapability: {
1743 const auto *A = cast<AcquireCapabilityAttr>(At);
1744 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1745 : ExclusiveLocksToAdd,
1748 CapDiagKind = ClassifyDiagnostic(A);
1752 // An assert will add a lock to the lockset, but will not generate
1753 // a warning if it is already there, and will not generate a warning
1754 // if it is not removed.
1755 case attr::AssertExclusiveLock: {
1756 const auto *A = cast<AssertExclusiveLockAttr>(At);
1758 CapExprSet AssertLocks;
1759 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
1760 for (const auto &AssertLock : AssertLocks)
1761 Analyzer->addLock(FSet,
1762 llvm::make_unique<LockableFactEntry>(
1763 AssertLock, LK_Exclusive, Loc, false, true),
1764 ClassifyDiagnostic(A));
1767 case attr::AssertSharedLock: {
1768 const auto *A = cast<AssertSharedLockAttr>(At);
1770 CapExprSet AssertLocks;
1771 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
1772 for (const auto &AssertLock : AssertLocks)
1773 Analyzer->addLock(FSet,
1774 llvm::make_unique<LockableFactEntry>(
1775 AssertLock, LK_Shared, Loc, false, true),
1776 ClassifyDiagnostic(A));
1780 case attr::AssertCapability: {
1781 const auto *A = cast<AssertCapabilityAttr>(At);
1782 CapExprSet AssertLocks;
1783 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
1784 for (const auto &AssertLock : AssertLocks)
1785 Analyzer->addLock(FSet,
1786 llvm::make_unique<LockableFactEntry>(
1788 A->isShared() ? LK_Shared : LK_Exclusive, Loc,
1790 ClassifyDiagnostic(A));
1794 // When we encounter an unlock function, we need to remove unlocked
1795 // mutexes from the lockset, and flag a warning if they are not there.
1796 case attr::ReleaseCapability: {
1797 const auto *A = cast<ReleaseCapabilityAttr>(At);
1799 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
1800 else if (A->isShared())
1801 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
1803 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
1805 CapDiagKind = ClassifyDiagnostic(A);
1809 case attr::RequiresCapability: {
1810 const auto *A = cast<RequiresCapabilityAttr>(At);
1811 for (auto *Arg : A->args()) {
1812 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
1813 POK_FunctionCall, ClassifyDiagnostic(A),
1815 // use for adopting a lock
1817 Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs
1818 : ScopedExclusiveReqs,
1825 case attr::LocksExcluded: {
1826 const auto *A = cast<LocksExcludedAttr>(At);
1827 for (auto *Arg : A->args())
1828 warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
1832 // Ignore attributes unrelated to thread-safety
1838 // Remove locks first to allow lock upgrading/downgrading.
1839 // FIXME -- should only fully remove if the attribute refers to 'this'.
1840 bool Dtor = isa<CXXDestructorDecl>(D);
1841 for (const auto &M : ExclusiveLocksToRemove)
1842 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
1843 for (const auto &M : SharedLocksToRemove)
1844 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
1845 for (const auto &M : GenericLocksToRemove)
1846 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
1849 for (const auto &M : ExclusiveLocksToAdd)
1850 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
1851 M, LK_Exclusive, Loc, isScopedVar),
1853 for (const auto &M : SharedLocksToAdd)
1854 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
1855 M, LK_Shared, Loc, isScopedVar),
1859 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
1860 SourceLocation MLoc = VD->getLocation();
1861 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
1862 // FIXME: does this store a pointer to DRE?
1863 CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
1865 std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(),
1866 std::back_inserter(ExclusiveLocksToAdd));
1867 std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(),
1868 std::back_inserter(SharedLocksToAdd));
1869 Analyzer->addLock(FSet,
1870 llvm::make_unique<ScopedLockableFactEntry>(
1871 Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
1876 /// For unary operations which read and write a variable, we need to
1877 /// check whether we hold any required mutexes. Reads are checked in
1879 void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
1880 switch (UO->getOpcode()) {
1885 checkAccess(UO->getSubExpr(), AK_Written);
1892 /// For binary operations which assign to a variable (writes), we need to check
1893 /// whether we hold any required mutexes.
1894 /// FIXME: Deal with non-primitive types.
1895 void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
1896 if (!BO->isAssignmentOp())
1899 // adjust the context
1900 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
1902 checkAccess(BO->getLHS(), AK_Written);
1905 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and
1906 /// need to ensure we hold any required mutexes.
1907 /// FIXME: Deal with non-primitive types.
1908 void BuildLockset::VisitCastExpr(CastExpr *CE) {
1909 if (CE->getCastKind() != CK_LValueToRValue)
1911 checkAccess(CE->getSubExpr(), AK_Read);
1914 void BuildLockset::VisitCallExpr(CallExpr *Exp) {
1915 bool ExamineArgs = true;
1916 bool OperatorFun = false;
1918 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
1919 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
1920 // ME can be null when calling a method pointer
1921 const CXXMethodDecl *MD = CE->getMethodDecl();
1924 if (ME->isArrow()) {
1926 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
1927 else // FIXME -- should be AK_Written
1928 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
1931 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
1932 else // FIXME -- should be AK_Written
1933 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
1936 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
1939 auto OEop = OE->getOperator();
1942 ExamineArgs = false;
1943 const Expr *Target = OE->getArg(0);
1944 const Expr *Source = OE->getArg(1);
1945 checkAccess(Target, AK_Written);
1946 checkAccess(Source, AK_Read);
1951 case OO_Subscript: {
1952 const Expr *Obj = OE->getArg(0);
1953 checkAccess(Obj, AK_Read);
1954 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
1955 // Grrr. operator* can be multiplication...
1956 checkPtAccess(Obj, AK_Read);
1961 // TODO: get rid of this, and rely on pass-by-ref instead.
1962 const Expr *Obj = OE->getArg(0);
1963 checkAccess(Obj, AK_Read);
1970 if (FunctionDecl *FD = Exp->getDirectCallee()) {
1971 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
1972 // only turns off checking within the body of a function, but we also
1973 // use it to turn off checking in arguments to the function. This
1974 // could result in some false negatives, but the alternative is to
1975 // create yet another attribute.
1976 if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
1977 unsigned Fn = FD->getNumParams();
1978 unsigned Cn = Exp->getNumArgs();
1983 if (isa<CXXMethodDecl>(FD)) {
1984 // First arg in operator call is implicit self argument,
1985 // and doesn't appear in the FunctionDecl.
1989 // Ignore the first argument of operators; it's been checked above.
1993 // Ignore default arguments
1994 unsigned n = (Fn < Cn) ? Fn : Cn;
1996 for (; i < n; ++i) {
1997 ParmVarDecl* Pvd = FD->getParamDecl(i);
1998 Expr* Arg = Exp->getArg(i+Skip);
1999 QualType Qt = Pvd->getType();
2000 if (Qt->isReferenceType())
2001 checkAccess(Arg, AK_Read, POK_PassByRef);
2007 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2008 if(!D || !D->hasAttrs())
2013 void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
2014 const CXXConstructorDecl *D = Exp->getConstructor();
2015 if (D && D->isCopyConstructor()) {
2016 const Expr* Source = Exp->getArg(0);
2017 checkAccess(Source, AK_Read);
2019 // FIXME -- only handles constructors in DeclStmt below.
2022 static CXXConstructorDecl *
2023 findConstructorForByValueReturn(const CXXRecordDecl *RD) {
2024 // Prefer a move constructor over a copy constructor. If there's more than
2025 // one copy constructor or more than one move constructor, we arbitrarily
2026 // pick the first declared such constructor rather than trying to guess which
2027 // one is more appropriate.
2028 CXXConstructorDecl *CopyCtor = nullptr;
2029 for (auto *Ctor : RD->ctors()) {
2030 if (Ctor->isDeleted())
2032 if (Ctor->isMoveConstructor())
2034 if (!CopyCtor && Ctor->isCopyConstructor())
2040 static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args,
2041 SourceLocation Loc) {
2042 ASTContext &Ctx = CD->getASTContext();
2043 return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc,
2044 CD, true, Args, false, false, false, false,
2045 CXXConstructExpr::CK_Complete,
2046 SourceRange(Loc, Loc));
2049 void BuildLockset::VisitDeclStmt(DeclStmt *S) {
2050 // adjust the context
2051 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2053 for (auto *D : S->getDeclGroup()) {
2054 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2055 Expr *E = VD->getInit();
2058 E = E->IgnoreParens();
2060 // handle constructors that involve temporaries
2061 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2062 E = EWC->getSubExpr();
2063 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2064 E = BTE->getSubExpr();
2066 if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
2067 const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
2068 if (!CtorD || !CtorD->hasAttrs())
2070 handleCall(E, CtorD, VD);
2071 } else if (isa<CallExpr>(E) && E->isRValue()) {
2072 // If the object is initialized by a function call that returns a
2073 // scoped lockable by value, use the attributes on the copy or move
2074 // constructor to figure out what effect that should have on the
2076 // FIXME: Is this really the best way to handle this situation?
2077 auto *RD = E->getType()->getAsCXXRecordDecl();
2078 if (!RD || !RD->hasAttr<ScopedLockableAttr>())
2080 CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD);
2081 if (!CtorD || !CtorD->hasAttrs())
2083 handleCall(buildFakeCtorCall(CtorD, {E}, E->getLocStart()), CtorD, VD);
2089 /// Compute the intersection of two locksets and issue warnings for any
2090 /// locks in the symmetric difference.
2092 /// This function is used at a merge point in the CFG when comparing the lockset
2093 /// of each branch being merged. For example, given the following sequence:
2094 /// A; if () then B; else C; D; we need to check that the lockset after B and C
2095 /// are the same. In the event of a difference, we use the intersection of these
2096 /// two locksets at the start of D.
2098 /// \param FSet1 The first lockset.
2099 /// \param FSet2 The second lockset.
2100 /// \param JoinLoc The location of the join point for error reporting
2101 /// \param LEK1 The error message to report if a mutex is missing from LSet1
2102 /// \param LEK2 The error message to report if a mutex is missing from Lset2
2103 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
2104 const FactSet &FSet2,
2105 SourceLocation JoinLoc,
2109 FactSet FSet1Orig = FSet1;
2111 // Find locks in FSet2 that conflict or are not in FSet1, and warn.
2112 for (const auto &Fact : FSet2) {
2113 const FactEntry *LDat1 = nullptr;
2114 const FactEntry *LDat2 = &FactMan[Fact];
2115 FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2);
2116 if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1];
2119 if (LDat1->kind() != LDat2->kind()) {
2120 Handler.handleExclusiveAndShared("mutex", LDat2->toString(),
2121 LDat2->loc(), LDat1->loc());
2122 if (Modify && LDat1->kind() != LK_Exclusive) {
2123 // Take the exclusive lock, which is the one in FSet2.
2127 else if (Modify && LDat1->asserted() && !LDat2->asserted()) {
2128 // The non-asserted lock in FSet2 is the one we want to track.
2132 LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
2137 // Find locks in FSet1 that are not in FSet2, and remove them.
2138 for (const auto &Fact : FSet1Orig) {
2139 const FactEntry *LDat1 = &FactMan[Fact];
2140 const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
2143 LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
2146 FSet1.removeLock(FactMan, *LDat1);
2151 // Return true if block B never continues to its successors.
2152 static bool neverReturns(const CFGBlock *B) {
2153 if (B->hasNoReturnElement())
2158 CFGElement Last = B->back();
2159 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2160 if (isa<CXXThrowExpr>(S->getStmt()))
2166 /// Check a function's CFG for thread-safety violations.
2168 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
2169 /// at the end of each block, and issue warnings for thread safety violations.
2170 /// Each block in the CFG is traversed exactly once.
2171 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2172 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2173 // For now, we just use the walker to set things up.
2174 threadSafety::CFGWalker walker;
2175 if (!walker.init(AC))
2178 // AC.dumpCFG(true);
2179 // threadSafety::printSCFG(walker);
2181 CFG *CFGraph = walker.getGraph();
2182 const NamedDecl *D = walker.getDecl();
2183 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D);
2184 CurrentMethod = dyn_cast<CXXMethodDecl>(D);
2186 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2189 // FIXME: Do something a bit more intelligent inside constructor and
2190 // destructor code. Constructors and destructors must assume unique access
2191 // to 'this', so checks on member variable access is disabled, but we should
2192 // still enable checks on other objects.
2193 if (isa<CXXConstructorDecl>(D))
2194 return; // Don't check inside constructors.
2195 if (isa<CXXDestructorDecl>(D))
2196 return; // Don't check inside destructors.
2198 Handler.enterFunction(CurrentFunction);
2200 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2201 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2203 // We need to explore the CFG via a "topological" ordering.
2204 // That way, we will be guaranteed to have information about required
2205 // predecessor locksets when exploring a new block.
2206 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2207 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2209 // Mark entry block as reachable
2210 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
2212 // Compute SSA names for local variables
2213 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2215 // Fill in source locations for all CFGBlocks.
2216 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2218 CapExprSet ExclusiveLocksAcquired;
2219 CapExprSet SharedLocksAcquired;
2220 CapExprSet LocksReleased;
2222 // Add locks from exclusive_locks_required and shared_locks_required
2223 // to initial lockset. Also turn off checking for lock and unlock functions.
2224 // FIXME: is there a more intelligent way to check lock/unlock functions?
2225 if (!SortedGraph->empty() && D->hasAttrs()) {
2226 const CFGBlock *FirstBlock = *SortedGraph->begin();
2227 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
2229 CapExprSet ExclusiveLocksToAdd;
2230 CapExprSet SharedLocksToAdd;
2231 StringRef CapDiagKind = "mutex";
2233 SourceLocation Loc = D->getLocation();
2234 for (const auto *Attr : D->attrs()) {
2235 Loc = Attr->getLocation();
2236 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2237 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2239 CapDiagKind = ClassifyDiagnostic(A);
2240 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2241 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2242 // We must ignore such methods.
2243 if (A->args_size() == 0)
2245 // FIXME -- deal with exclusive vs. shared unlock functions?
2246 getMutexIDs(ExclusiveLocksToAdd, A, nullptr, D);
2247 getMutexIDs(LocksReleased, A, nullptr, D);
2248 CapDiagKind = ClassifyDiagnostic(A);
2249 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2250 if (A->args_size() == 0)
2252 getMutexIDs(A->isShared() ? SharedLocksAcquired
2253 : ExclusiveLocksAcquired,
2255 CapDiagKind = ClassifyDiagnostic(A);
2256 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
2257 // Don't try to check trylock functions for now.
2259 } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
2260 // Don't try to check trylock functions for now.
2262 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2263 // Don't try to check trylock functions for now.
2268 // FIXME -- Loc can be wrong here.
2269 for (const auto &Mu : ExclusiveLocksToAdd) {
2270 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc);
2271 Entry->setDeclared(true);
2272 addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
2274 for (const auto &Mu : SharedLocksToAdd) {
2275 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc);
2276 Entry->setDeclared(true);
2277 addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
2281 for (const auto *CurrBlock : *SortedGraph) {
2282 int CurrBlockID = CurrBlock->getBlockID();
2283 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2285 // Use the default initial lockset in case there are no predecessors.
2286 VisitedBlocks.insert(CurrBlock);
2288 // Iterate through the predecessor blocks and warn if the lockset for all
2289 // predecessors is not the same. We take the entry lockset of the current
2290 // block to be the intersection of all previous locksets.
2291 // FIXME: By keeping the intersection, we may output more errors in future
2292 // for a lock which is not in the intersection, but was in the union. We
2293 // may want to also keep the union in future. As an example, let's say
2294 // the intersection contains Mutex L, and the union contains L and M.
2295 // Later we unlock M. At this point, we would output an error because we
2296 // never locked M; although the real error is probably that we forgot to
2297 // lock M on all code paths. Conversely, let's say that later we lock M.
2298 // In this case, we should compare against the intersection instead of the
2299 // union because the real error is probably that we forgot to unlock M on
2301 bool LocksetInitialized = false;
2302 SmallVector<CFGBlock *, 8> SpecialBlocks;
2303 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2304 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2305 // if *PI -> CurrBlock is a back edge
2306 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2309 int PrevBlockID = (*PI)->getBlockID();
2310 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2312 // Ignore edges from blocks that can't return.
2313 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2316 // Okay, we can reach this block from the entry.
2317 CurrBlockInfo->Reachable = true;
2319 // If the previous block ended in a 'continue' or 'break' statement, then
2320 // a difference in locksets is probably due to a bug in that block, rather
2321 // than in some other predecessor. In that case, keep the other
2322 // predecessor's lockset.
2323 if (const Stmt *Terminator = (*PI)->getTerminator()) {
2324 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
2325 SpecialBlocks.push_back(*PI);
2330 FactSet PrevLockset;
2331 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2333 if (!LocksetInitialized) {
2334 CurrBlockInfo->EntrySet = PrevLockset;
2335 LocksetInitialized = true;
2337 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
2338 CurrBlockInfo->EntryLoc,
2339 LEK_LockedSomePredecessors);
2343 // Skip rest of block if it's not reachable.
2344 if (!CurrBlockInfo->Reachable)
2347 // Process continue and break blocks. Assume that the lockset for the
2348 // resulting block is unaffected by any discrepancies in them.
2349 for (const auto *PrevBlock : SpecialBlocks) {
2350 int PrevBlockID = PrevBlock->getBlockID();
2351 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2353 if (!LocksetInitialized) {
2354 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
2355 LocksetInitialized = true;
2357 // Determine whether this edge is a loop terminator for diagnostic
2358 // purposes. FIXME: A 'break' statement might be a loop terminator, but
2359 // it might also be part of a switch. Also, a subsequent destructor
2360 // might add to the lockset, in which case the real issue might be a
2361 // double lock on the other path.
2362 const Stmt *Terminator = PrevBlock->getTerminator();
2363 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
2365 FactSet PrevLockset;
2366 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
2367 PrevBlock, CurrBlock);
2369 // Do not update EntrySet.
2370 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
2371 PrevBlockInfo->ExitLoc,
2372 IsLoop ? LEK_LockedSomeLoopIterations
2373 : LEK_LockedSomePredecessors,
2378 BuildLockset LocksetBuilder(this, *CurrBlockInfo);
2380 // Visit all the statements in the basic block.
2381 for (const auto &BI : *CurrBlock) {
2382 switch (BI.getKind()) {
2383 case CFGElement::Statement: {
2384 CFGStmt CS = BI.castAs<CFGStmt>();
2385 LocksetBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
2388 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
2389 case CFGElement::AutomaticObjectDtor: {
2390 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2391 auto *DD = const_cast<CXXDestructorDecl *>(
2392 AD.getDestructorDecl(AC.getASTContext()));
2393 if (!DD->hasAttrs())
2396 // Create a dummy expression,
2397 auto *VD = const_cast<VarDecl *>(AD.getVarDecl());
2398 DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(),
2399 VK_LValue, AD.getTriggerStmt()->getLocEnd());
2400 LocksetBuilder.handleCall(&DRE, DD);
2407 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2409 // For every back edge from CurrBlock (the end of the loop) to another block
2410 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2411 // the one held at the beginning of FirstLoopBlock. We can look up the
2412 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2413 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2414 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2415 // if CurrBlock -> *SI is *not* a back edge
2416 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2419 CFGBlock *FirstLoopBlock = *SI;
2420 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2421 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2422 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
2424 LEK_LockedSomeLoopIterations,
2429 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
2430 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
2432 // Skip the final check if the exit block is unreachable.
2433 if (!Final->Reachable)
2436 // By default, we expect all locks held on entry to be held on exit.
2437 FactSet ExpectedExitSet = Initial->EntrySet;
2439 // Adjust the expected exit set by adding or removing locks, as declared
2440 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2441 // issue the appropriate warning.
2442 // FIXME: the location here is not quite right.
2443 for (const auto &Lock : ExclusiveLocksAcquired)
2444 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
2445 Lock, LK_Exclusive, D->getLocation()));
2446 for (const auto &Lock : SharedLocksAcquired)
2447 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
2448 Lock, LK_Shared, D->getLocation()));
2449 for (const auto &Lock : LocksReleased)
2450 ExpectedExitSet.removeLock(FactMan, Lock);
2452 // FIXME: Should we call this function for all blocks which exit the function?
2453 intersectAndWarn(ExpectedExitSet, Final->ExitSet,
2455 LEK_LockedAtEndOfFunction,
2456 LEK_NotLockedAtEndOfFunction,
2459 Handler.leaveFunction(CurrentFunction);
2462 /// Check a function's CFG for thread-safety violations.
2464 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
2465 /// at the end of each block, and issue warnings for thread safety violations.
2466 /// Each block in the CFG is traversed exactly once.
2467 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2468 ThreadSafetyHandler &Handler,
2471 *BSet = new BeforeSet;
2472 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2473 Analyzer.runAnalysis(AC);
2476 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2478 /// Helper function that returns a LockKind required for the given level
2480 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2485 return LK_Exclusive;
2487 llvm_unreachable("Unknown AccessKind");