1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines ExprEngine's support for calls and returns.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "ExprEngine"
16 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
17 #include "clang/AST/CXXInheritance.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/ParentMap.h"
20 #include "clang/Analysis/Analyses/LiveVariables.h"
21 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/SaveAndRestore.h"
27 using namespace clang;
30 STATISTIC(NumOfDynamicDispatchPathSplits,
31 "The # of times we split the path due to imprecise dynamic dispatch info");
33 STATISTIC(NumInlinedCalls,
34 "The # of times we inlined a call");
36 STATISTIC(NumReachedInlineCountMax,
37 "The # of times we reached inline count maximum");
39 void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
40 // Get the entry block in the CFG of the callee.
41 const StackFrameContext *calleeCtx = CE.getCalleeContext();
42 const CFG *CalleeCFG = calleeCtx->getCFG();
43 const CFGBlock *Entry = &(CalleeCFG->getEntry());
46 assert(Entry->empty());
47 assert(Entry->succ_size() == 1);
49 // Get the solitary sucessor.
50 const CFGBlock *Succ = *(Entry->succ_begin());
52 // Construct an edge representing the starting location in the callee.
53 BlockEdge Loc(Entry, Succ, calleeCtx);
55 ProgramStateRef state = Pred->getState();
57 // Construct a new node and add it to the worklist.
59 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
60 Node->addPredecessor(Pred, G);
62 Engine.getWorkList()->enqueue(Node);
65 // Find the last statement on the path to the exploded node and the
66 // corresponding Block.
67 static std::pair<const Stmt*,
68 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
70 const CFGBlock *Blk = 0;
71 const StackFrameContext *SF =
72 Node->getLocation().getLocationContext()->getCurrentStackFrame();
74 // Back up through the ExplodedGraph until we reach a statement node in this
77 const ProgramPoint &PP = Node->getLocation();
79 if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
80 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
83 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
84 S = CEE->getCalleeContext()->getCallSite();
88 // If there is no statement, this is an implicitly-generated call.
89 // We'll walk backwards over it and then continue the loop to find
90 // an actual statement.
91 Optional<CallEnter> CE;
93 Node = Node->getFirstPred();
94 CE = Node->getLocationAs<CallEnter>();
95 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
97 // Continue searching the graph.
98 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
101 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
102 // If we reached the CallEnter for this function, it has no statements.
103 if (CE->getCalleeContext() == SF)
107 if (Node->pred_empty())
108 return std::pair<const Stmt*, const CFGBlock*>((Stmt*)0, (CFGBlock*)0);
110 Node = *Node->pred_begin();
113 return std::pair<const Stmt*, const CFGBlock*>(S, Blk);
116 /// Adjusts a return value when the called function's return type does not
117 /// match the caller's expression type. This can happen when a dynamic call
118 /// is devirtualized, and the overridding method has a covariant (more specific)
119 /// return type than the parent's method. For C++ objects, this means we need
120 /// to add base casts.
121 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
122 StoreManager &StoreMgr) {
123 // For now, the only adjustments we handle apply only to locations.
127 // If the types already match, don't do any unnecessary work.
128 ExpectedTy = ExpectedTy.getCanonicalType();
129 ActualTy = ActualTy.getCanonicalType();
130 if (ExpectedTy == ActualTy)
133 // No adjustment is needed between Objective-C pointer types.
134 if (ExpectedTy->isObjCObjectPointerType() &&
135 ActualTy->isObjCObjectPointerType())
138 // C++ object pointers may need "derived-to-base" casts.
139 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
140 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
141 if (ExpectedClass && ActualClass) {
142 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
143 /*DetectVirtual=*/false);
144 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
145 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
146 return StoreMgr.evalDerivedToBase(V, Paths.front());
150 // Unfortunately, Objective-C does not enforce that overridden methods have
151 // covariant return types, so we can't assert that that never happens.
152 // Be safe and return UnknownVal().
156 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
158 ExplodedNodeSet &Dst) {
159 // Find the last statement in the function and the corresponding basic block.
160 const Stmt *LastSt = 0;
161 const CFGBlock *Blk = 0;
162 llvm::tie(LastSt, Blk) = getLastStmt(Pred);
163 if (!Blk || !LastSt) {
168 // Here, we destroy the current location context. We use the current
169 // function's entire body as a diagnostic statement, with which the program
170 // point will be associated. However, we only want to use LastStmt as a
171 // reference for what to clean up if it's a ReturnStmt; otherwise, everything
173 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
174 const LocationContext *LCtx = Pred->getLocationContext();
175 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
176 LCtx->getAnalysisDeclContext()->getBody(),
177 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
180 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
181 const StackFrameContext *calleeCtx) {
182 const Decl *RuntimeCallee = calleeCtx->getDecl();
183 const Decl *StaticDecl = Call->getDecl();
184 assert(RuntimeCallee);
187 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
190 /// Returns true if the CXXConstructExpr \p E was intended to construct a
191 /// prvalue for the region in \p V.
193 /// Note that we can't just test for rvalue vs. glvalue because
194 /// CXXConstructExprs embedded in DeclStmts and initializers are considered
195 /// rvalues by the AST, and the analyzer would like to treat them as lvalues.
196 static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
200 const MemRegion *MR = V.getAsRegion();
204 return isa<CXXTempObjectRegion>(MR);
207 /// The call exit is simulated with a sequence of nodes, which occur between
208 /// CallExitBegin and CallExitEnd. The following operations occur between the
209 /// two program points:
210 /// 1. CallExitBegin (triggers the start of call exit sequence)
211 /// 2. Bind the return value
212 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
213 /// 4. CallExitEnd (switch to the caller context)
214 /// 5. PostStmt<CallExpr>
215 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
216 // Step 1 CEBNode was generated before the call.
218 const StackFrameContext *calleeCtx =
219 CEBNode->getLocationContext()->getCurrentStackFrame();
221 // The parent context might not be a stack frame, so make sure we
222 // look up the first enclosing stack frame.
223 const StackFrameContext *callerCtx =
224 calleeCtx->getParent()->getCurrentStackFrame();
226 const Stmt *CE = calleeCtx->getCallSite();
227 ProgramStateRef state = CEBNode->getState();
228 // Find the last statement in the function and the corresponding basic block.
229 const Stmt *LastSt = 0;
230 const CFGBlock *Blk = 0;
231 llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
233 // Generate a CallEvent /before/ cleaning the state, so that we can get the
234 // correct value for 'this' (if necessary).
235 CallEventManager &CEMgr = getStateManager().getCallEventManager();
236 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
238 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
240 // If the callee returns an expression, bind its value to CallExpr.
242 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
243 const LocationContext *LCtx = CEBNode->getLocationContext();
244 SVal V = state->getSVal(RS, LCtx);
246 // Ensure that the return type matches the type of the returned Expr.
247 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
248 QualType ReturnedTy =
249 CallEvent::getDeclaredResultType(calleeCtx->getDecl());
250 if (!ReturnedTy.isNull()) {
251 if (const Expr *Ex = dyn_cast<Expr>(CE)) {
252 V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
258 state = state->BindExpr(CE, callerCtx, V);
261 // Bind the constructed object value to CXXConstructExpr.
262 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
263 loc::MemRegionVal This =
264 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
265 SVal ThisV = state->getSVal(This);
267 // If the constructed object is a temporary prvalue, get its bindings.
268 if (isTemporaryPRValue(CCE, ThisV))
269 ThisV = state->getSVal(ThisV.castAs<Loc>());
271 state = state->BindExpr(CCE, callerCtx, ThisV);
275 // Step 3: BindedRetNode -> CleanedNodes
276 // If we can find a statement and a block in the inlined function, run remove
277 // dead bindings before returning from the call. This is important to ensure
278 // that we report the issues such as leaks in the stack contexts in which
280 ExplodedNodeSet CleanedNodes;
281 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
282 static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
283 PostStmt Loc(LastSt, calleeCtx, &retValBind);
285 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
286 BindedRetNode->addPredecessor(CEBNode, G);
290 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
292 // Here, we call the Symbol Reaper with 0 statement and callee location
293 // context, telling it to clean up everything in the callee's context
294 // (and its children). We use the callee's function body as a diagnostic
295 // statement, with which the program point will be associated.
296 removeDead(BindedRetNode, CleanedNodes, 0, calleeCtx,
297 calleeCtx->getAnalysisDeclContext()->getBody(),
298 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
301 CleanedNodes.Add(CEBNode);
304 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
305 E = CleanedNodes.end(); I != E; ++I) {
307 // Step 4: Generate the CallExit and leave the callee's context.
308 // CleanedNodes -> CEENode
309 CallExitEnd Loc(calleeCtx, callerCtx);
311 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
312 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
313 CEENode->addPredecessor(*I, G);
317 // Step 5: Perform the post-condition check of the CallExpr and enqueue the
318 // result onto the work list.
319 // CEENode -> Dst -> WorkList
320 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
321 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
323 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
325 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
327 ExplodedNodeSet DstPostCall;
328 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
330 /*WasInlined=*/true);
333 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
334 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
336 /*WasInlined=*/true);
338 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
339 *this, /*WasInlined=*/true);
341 Dst.insert(DstPostCall);
344 // Enqueue the next element in the block.
345 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
347 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
348 calleeCtx->getIndex()+1);
353 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
354 bool &IsRecursive, unsigned &StackDepth) {
359 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
360 const Decl *DI = SFC->getDecl();
362 // Mark recursive (and mutually recursive) functions and always count
363 // them when measuring the stack depth.
367 LCtx = LCtx->getParent();
371 // Do not count the small functions when determining the stack depth.
372 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
373 const CFG *CalleeCFG = CalleeADC->getCFG();
374 if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
377 LCtx = LCtx->getParent();
382 static bool IsInStdNamespace(const FunctionDecl *FD) {
383 const DeclContext *DC = FD->getEnclosingNamespaceContext();
384 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
388 while (const DeclContext *Parent = ND->getParent()) {
389 if (!isa<NamespaceDecl>(Parent))
391 ND = cast<NamespaceDecl>(Parent);
394 return ND->getName() == "std";
397 // The GDM component containing the dynamic dispatch bifurcation info. When
398 // the exact type of the receiver is not known, we want to explore both paths -
399 // one on which we do inline it and the other one on which we don't. This is
400 // done to ensure we do not drop coverage.
401 // This is the map from the receiver region to a bool, specifying either we
402 // consider this region's information precise or not along the given path.
404 enum DynamicDispatchMode {
405 DynamicDispatchModeInlined = 1,
406 DynamicDispatchModeConservative
409 REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
410 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
413 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
414 NodeBuilder &Bldr, ExplodedNode *Pred,
415 ProgramStateRef State) {
418 const LocationContext *CurLC = Pred->getLocationContext();
419 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
420 const LocationContext *ParentOfCallee = CallerSFC;
421 if (Call.getKind() == CE_Block) {
422 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
423 assert(BR && "If we have the block definition we should have its region");
424 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
425 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
430 // This may be NULL, but that's fine.
431 const Expr *CallE = Call.getOriginExpr();
433 // Construct a new stack frame for the callee.
434 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
435 const StackFrameContext *CalleeSFC =
436 CalleeADC->getStackFrame(ParentOfCallee, CallE,
437 currBldrCtx->getBlock(),
441 CallEnter Loc(CallE, CalleeSFC, CurLC);
443 // Construct a new state which contains the mapping from actual to
445 State = State->enterStackFrame(Call, CalleeSFC);
448 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
449 N->addPredecessor(Pred, G);
451 Engine.getWorkList()->enqueue(N);
454 // If we decided to inline the call, the successor has been manually
455 // added onto the work list so remove it from the node builder.
456 Bldr.takeNodes(Pred);
460 // Mark the decl as visited.
462 VisitedCallees->insert(D);
467 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
469 const void *ReplayState = State->get<ReplayWithoutInlining>();
473 assert(ReplayState == CallE && "Backtracked to the wrong call.");
476 return State->remove<ReplayWithoutInlining>();
479 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
480 ExplodedNodeSet &dst) {
481 // Perform the previsit of the CallExpr.
482 ExplodedNodeSet dstPreVisit;
483 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
485 // Get the call in its initial state. We use this as a template to perform
487 CallEventManager &CEMgr = getStateManager().getCallEventManager();
488 CallEventRef<> CallTemplate
489 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
491 // Evaluate the function call. We try each of the checkers
492 // to see if the can evaluate the function call.
493 ExplodedNodeSet dstCallEvaluated;
494 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
496 evalCall(dstCallEvaluated, *I, *CallTemplate);
499 // Finally, perform the post-condition check of the CallExpr and store
500 // the created nodes in 'Dst'.
501 // Note that if the call was inlined, dstCallEvaluated will be empty.
502 // The post-CallExpr check will occur in processCallExit.
503 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
507 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
508 const CallEvent &Call) {
509 // WARNING: At this time, the state attached to 'Call' may be older than the
510 // state in 'Pred'. This is a minor optimization since CheckerManager will
511 // use an updated CallEvent instance when calling checkers, but if 'Call' is
512 // ever used directly in this function all callers should be updated to pass
513 // the most recent state. (It is probably not worth doing the work here since
514 // for some callers this will not be necessary.)
516 // Run any pre-call checks using the generic call interface.
517 ExplodedNodeSet dstPreVisit;
518 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
520 // Actually evaluate the function call. We try each of the checkers
521 // to see if the can evaluate the function call, and get a callback at
522 // defaultEvalCall if all of them fail.
523 ExplodedNodeSet dstCallEvaluated;
524 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
527 // Finally, run any post-call checks.
528 getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
532 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
533 const LocationContext *LCtx,
534 ProgramStateRef State) {
535 const Expr *E = Call.getOriginExpr();
539 // Some method families have known return values.
540 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
541 switch (Msg->getMethodFamily()) {
544 case OMF_autorelease:
547 // These methods return their receivers.
548 return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
551 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
552 SVal ThisV = C->getCXXThisVal();
554 // If the constructed object is a temporary prvalue, get its bindings.
555 if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
556 ThisV = State->getSVal(ThisV.castAs<Loc>());
558 return State->BindExpr(E, LCtx, ThisV);
561 // Conjure a symbol if the return value is unknown.
562 QualType ResultTy = Call.getResultType();
563 SValBuilder &SVB = getSValBuilder();
564 unsigned Count = currBldrCtx->blockCount();
565 SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count);
566 return State->BindExpr(E, LCtx, R);
569 // Conservatively evaluate call by invalidating regions and binding
570 // a conjured return value.
571 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
573 ProgramStateRef State) {
574 State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
575 State = bindReturnValue(Call, Pred->getLocationContext(), State);
577 // And make the result node.
578 Bldr.generateNode(Call.getProgramPoint(), State, Pred);
581 enum CallInlinePolicy {
587 static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
588 const ExplodedNode *Pred,
589 AnalyzerOptions &Opts) {
590 const LocationContext *CurLC = Pred->getLocationContext();
591 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
592 switch (Call.getKind()) {
597 case CE_CXXMemberOperator:
598 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
599 return CIP_DisallowedAlways;
601 case CE_CXXConstructor: {
602 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
603 return CIP_DisallowedAlways;
605 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
607 // FIXME: We don't handle constructors or destructors for arrays properly.
608 // Even once we do, we still need to be careful about implicitly-generated
609 // initializers for array fields in default move/copy constructors.
610 const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
611 if (Target && isa<ElementRegion>(Target))
612 return CIP_DisallowedOnce;
614 // FIXME: This is a hack. We don't use the correct region for a new
615 // expression, so if we inline the constructor its result will just be
616 // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
617 // and the longer-term possible fix is discussed in PR12014.
618 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
619 if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
620 if (isa<CXXNewExpr>(Parent))
621 return CIP_DisallowedOnce;
623 // Inlining constructors requires including initializers in the CFG.
624 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
625 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
628 // If the destructor is trivial, it's always safe to inline the constructor.
629 if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
632 // For other types, only inline constructors if destructor inlining is
634 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
635 return CIP_DisallowedAlways;
637 // FIXME: This is a hack. We don't handle temporary destructors
638 // right now, so we shouldn't inline their constructors.
639 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
640 if (!Target || !isa<DeclRegion>(Target))
641 return CIP_DisallowedOnce;
645 case CE_CXXDestructor: {
646 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
647 return CIP_DisallowedAlways;
649 // Inlining destructors requires building the CFG correctly.
650 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
651 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
654 const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
656 // FIXME: We don't handle constructors or destructors for arrays properly.
657 const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
658 if (Target && isa<ElementRegion>(Target))
659 return CIP_DisallowedOnce;
663 case CE_CXXAllocator:
664 // Do not inline allocators until we model deallocators.
665 // This is unfortunate, but basically necessary for smart pointers and such.
666 return CIP_DisallowedAlways;
668 if (!Opts.mayInlineObjCMethod())
669 return CIP_DisallowedAlways;
670 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
671 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
672 return CIP_DisallowedAlways;
679 /// Returns true if the given C++ class contains a member with the given name.
680 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
682 const IdentifierInfo &II = Ctx.Idents.get(Name);
683 DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
684 if (!RD->lookup(DeclName).empty())
687 CXXBasePaths Paths(false, false, false);
688 if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember,
689 DeclName.getAsOpaquePtr(),
696 /// Returns true if the given C++ class is a container or iterator.
698 /// Our heuristic for this is whether it contains a method named 'begin()' or a
699 /// nested type named 'iterator' or 'iterator_category'.
700 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
701 return hasMember(Ctx, RD, "begin") ||
702 hasMember(Ctx, RD, "iterator") ||
703 hasMember(Ctx, RD, "iterator_category");
706 /// Returns true if the given function refers to a constructor or destructor of
707 /// a C++ container or iterator.
709 /// We generally do a poor job modeling most containers right now, and would
710 /// prefer not to inline their setup and teardown.
711 static bool isContainerCtorOrDtor(const ASTContext &Ctx,
712 const FunctionDecl *FD) {
713 if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)))
716 const CXXRecordDecl *RD = cast<CXXMethodDecl>(FD)->getParent();
717 return isContainerClass(Ctx, RD);
720 /// Returns true if the function in \p CalleeADC may be inlined in general.
722 /// This checks static properties of the function, such as its signature and
723 /// CFG, to determine whether the analyzer should ever consider inlining it,
725 static bool mayInlineDecl(const CallEvent &Call, AnalysisDeclContext *CalleeADC,
726 AnalyzerOptions &Opts) {
727 // FIXME: Do not inline variadic calls.
728 if (Call.isVariadic())
731 // Check certain C++-related inlining policies.
732 ASTContext &Ctx = CalleeADC->getASTContext();
733 if (Ctx.getLangOpts().CPlusPlus) {
734 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
735 // Conditionally control the inlining of template functions.
736 if (!Opts.mayInlineTemplateFunctions())
737 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
740 // Conditionally control the inlining of C++ standard library functions.
741 if (!Opts.mayInlineCXXStandardLibrary())
742 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
743 if (IsInStdNamespace(FD))
746 // Conditionally control the inlining of methods on objects that look
747 // like C++ containers.
748 if (!Opts.mayInlineCXXContainerCtorsAndDtors())
749 if (!Ctx.getSourceManager().isFromMainFile(FD->getLocation()))
750 if (isContainerCtorOrDtor(Ctx, FD))
755 // It is possible that the CFG cannot be constructed.
756 // Be safe, and check if the CalleeCFG is valid.
757 const CFG *CalleeCFG = CalleeADC->getCFG();
761 // Do not inline large functions.
762 if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
765 // It is possible that the live variables analysis cannot be
766 // run. If so, bail out.
767 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
773 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
774 const ExplodedNode *Pred) {
778 AnalysisManager &AMgr = getAnalysisManager();
779 AnalyzerOptions &Opts = AMgr.options;
780 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
781 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
783 // The auto-synthesized bodies are essential to inline as they are
784 // usually small and commonly used. Note: we should do this check early on to
785 // ensure we always inline these calls.
786 if (CalleeADC->isBodyAutosynthesized())
789 if (!AMgr.shouldInlineCall())
792 // Check if this function has been marked as non-inlinable.
793 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
794 if (MayInline.hasValue()) {
795 if (!MayInline.getValue())
799 // We haven't actually checked the static properties of this function yet.
800 // Do that now, and record our decision in the function summaries.
801 if (mayInlineDecl(Call, CalleeADC, Opts)) {
802 Engine.FunctionSummaries->markMayInline(D);
804 Engine.FunctionSummaries->markShouldNotInline(D);
809 // Check if we should inline a call based on its kind.
810 // FIXME: this checks both static and dynamic properties of the call, which
811 // means we're redoing a bit of work that could be cached in the function
813 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
814 if (CIP != CIP_Allowed) {
815 if (CIP == CIP_DisallowedAlways) {
816 assert(!MayInline.hasValue() || MayInline.getValue());
817 Engine.FunctionSummaries->markShouldNotInline(D);
822 const CFG *CalleeCFG = CalleeADC->getCFG();
824 // Do not inline if recursive or we've reached max stack frame count.
825 bool IsRecursive = false;
826 unsigned StackDepth = 0;
827 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
828 if ((StackDepth >= Opts.InlineMaxStackDepth) &&
829 ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
833 // Do not inline large functions too many times.
834 if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
835 Opts.getMaxTimesInlineLarge()) &&
836 CalleeCFG->getNumBlockIDs() > 13) {
837 NumReachedInlineCountMax++;
841 if (HowToInline == Inline_Minimal &&
842 (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
846 Engine.FunctionSummaries->bumpNumTimesInlined(D);
851 static bool isTrivialObjectAssignment(const CallEvent &Call) {
852 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
856 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
859 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
862 return MD->isTrivial();
865 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
866 const CallEvent &CallTemplate) {
867 // Make sure we have the most recent state attached to the call.
868 ProgramStateRef State = Pred->getState();
869 CallEventRef<> Call = CallTemplate.cloneWithState(State);
871 // Special-case trivial assignment operators.
872 if (isTrivialObjectAssignment(*Call)) {
873 performTrivialCopy(Bldr, Pred, *Call);
877 // Try to inline the call.
878 // The origin expression here is just used as a kind of checksum;
879 // this should still be safe even for CallEvents that don't come from exprs.
880 const Expr *E = Call->getOriginExpr();
882 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
883 if (InlinedFailedState) {
884 // If we already tried once and failed, make sure we don't retry later.
885 State = InlinedFailedState;
887 RuntimeDefinition RD = Call->getRuntimeDefinition();
888 const Decl *D = RD.getDecl();
889 if (shouldInlineCall(*Call, D, Pred)) {
890 if (RD.mayHaveOtherDefinitions()) {
891 AnalyzerOptions &Options = getAnalysisManager().options;
893 // Explore with and without inlining the call.
894 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
895 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
899 // Don't inline if we're not in any dynamic dispatch mode.
900 if (Options.getIPAMode() != IPAK_DynamicDispatch) {
901 conservativeEvalCall(*Call, Bldr, Pred, State);
906 // We are not bifurcating and we do have a Decl, so just inline.
907 if (inlineCall(*Call, D, Bldr, Pred, State))
912 // If we can't inline it, handle the return value and invalidate the regions.
913 conservativeEvalCall(*Call, Bldr, Pred, State);
916 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
917 const CallEvent &Call, const Decl *D,
918 NodeBuilder &Bldr, ExplodedNode *Pred) {
920 BifurReg = BifurReg->StripCasts();
922 // Check if we've performed the split already - note, we only want
923 // to split the path once per memory region.
924 ProgramStateRef State = Pred->getState();
925 const unsigned *BState =
926 State->get<DynamicDispatchBifurcationMap>(BifurReg);
928 // If we are on "inline path", keep inlining if possible.
929 if (*BState == DynamicDispatchModeInlined)
930 if (inlineCall(Call, D, Bldr, Pred, State))
932 // If inline failed, or we are on the path where we assume we
933 // don't have enough info about the receiver to inline, conjure the
934 // return value and invalidate the regions.
935 conservativeEvalCall(Call, Bldr, Pred, State);
939 // If we got here, this is the first time we process a message to this
940 // region, so split the path.
941 ProgramStateRef IState =
942 State->set<DynamicDispatchBifurcationMap>(BifurReg,
943 DynamicDispatchModeInlined);
944 inlineCall(Call, D, Bldr, Pred, IState);
946 ProgramStateRef NoIState =
947 State->set<DynamicDispatchBifurcationMap>(BifurReg,
948 DynamicDispatchModeConservative);
949 conservativeEvalCall(Call, Bldr, Pred, NoIState);
951 NumOfDynamicDispatchPathSplits++;
956 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
957 ExplodedNodeSet &Dst) {
959 ExplodedNodeSet dstPreVisit;
960 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
962 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
964 if (RS->getRetValue()) {
965 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
966 ei = dstPreVisit.end(); it != ei; ++it) {
967 B.generateNode(RS, *it, (*it)->getState());