1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines ExprEngine's support for calls and returns.
12 //===----------------------------------------------------------------------===//
14 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
15 #include "PrettyStackTraceLocationContext.h"
16 #include "clang/AST/CXXInheritance.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/Analysis/Analyses/LiveVariables.h"
19 #include "clang/Analysis/ConstructionContext.h"
20 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Support/SaveAndRestore.h"
26 using namespace clang;
29 #define DEBUG_TYPE "ExprEngine"
31 STATISTIC(NumOfDynamicDispatchPathSplits,
32 "The # of times we split the path due to imprecise dynamic dispatch info");
34 STATISTIC(NumInlinedCalls,
35 "The # of times we inlined a call");
37 STATISTIC(NumReachedInlineCountMax,
38 "The # of times we reached inline count maximum");
40 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
42 // Get the entry block in the CFG of the callee.
43 const StackFrameContext *calleeCtx = CE.getCalleeContext();
44 PrettyStackTraceLocationContext CrashInfo(calleeCtx);
45 const CFGBlock *Entry = CE.getEntry();
48 assert(Entry->empty());
49 assert(Entry->succ_size() == 1);
51 // Get the solitary successor.
52 const CFGBlock *Succ = *(Entry->succ_begin());
54 // Construct an edge representing the starting location in the callee.
55 BlockEdge Loc(Entry, Succ, calleeCtx);
57 ProgramStateRef state = Pred->getState();
59 // Construct a new node, notify checkers that analysis of the function has
60 // begun, and add the resultant nodes to the worklist.
62 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
63 Node->addPredecessor(Pred, G);
65 ExplodedNodeSet DstBegin;
66 processBeginOfFunction(BC, Node, DstBegin, Loc);
67 Engine.enqueue(DstBegin);
71 // Find the last statement on the path to the exploded node and the
72 // corresponding Block.
73 static std::pair<const Stmt*,
74 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
75 const Stmt *S = nullptr;
76 const CFGBlock *Blk = nullptr;
77 const StackFrameContext *SF = Node->getStackFrame();
79 // Back up through the ExplodedGraph until we reach a statement node in this
82 const ProgramPoint &PP = Node->getLocation();
84 if (PP.getStackFrame() == SF) {
85 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
88 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
89 S = CEE->getCalleeContext()->getCallSite();
93 // If there is no statement, this is an implicitly-generated call.
94 // We'll walk backwards over it and then continue the loop to find
95 // an actual statement.
96 Optional<CallEnter> CE;
98 Node = Node->getFirstPred();
99 CE = Node->getLocationAs<CallEnter>();
100 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
102 // Continue searching the graph.
103 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
106 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
107 // If we reached the CallEnter for this function, it has no statements.
108 if (CE->getCalleeContext() == SF)
112 if (Node->pred_empty())
113 return std::make_pair(nullptr, nullptr);
115 Node = *Node->pred_begin();
118 return std::make_pair(S, Blk);
121 /// Adjusts a return value when the called function's return type does not
122 /// match the caller's expression type. This can happen when a dynamic call
123 /// is devirtualized, and the overriding method has a covariant (more specific)
124 /// return type than the parent's method. For C++ objects, this means we need
125 /// to add base casts.
126 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
127 StoreManager &StoreMgr) {
128 // For now, the only adjustments we handle apply only to locations.
132 // If the types already match, don't do any unnecessary work.
133 ExpectedTy = ExpectedTy.getCanonicalType();
134 ActualTy = ActualTy.getCanonicalType();
135 if (ExpectedTy == ActualTy)
138 // No adjustment is needed between Objective-C pointer types.
139 if (ExpectedTy->isObjCObjectPointerType() &&
140 ActualTy->isObjCObjectPointerType())
143 // C++ object pointers may need "derived-to-base" casts.
144 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
145 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
146 if (ExpectedClass && ActualClass) {
147 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
148 /*DetectVirtual=*/false);
149 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
150 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
151 return StoreMgr.evalDerivedToBase(V, Paths.front());
155 // Unfortunately, Objective-C does not enforce that overridden methods have
156 // covariant return types, so we can't assert that that never happens.
157 // Be safe and return UnknownVal().
161 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
163 ExplodedNodeSet &Dst) {
164 // Find the last statement in the function and the corresponding basic block.
165 const Stmt *LastSt = nullptr;
166 const CFGBlock *Blk = nullptr;
167 std::tie(LastSt, Blk) = getLastStmt(Pred);
168 if (!Blk || !LastSt) {
173 // Here, we destroy the current location context. We use the current
174 // function's entire body as a diagnostic statement, with which the program
175 // point will be associated. However, we only want to use LastStmt as a
176 // reference for what to clean up if it's a ReturnStmt; otherwise, everything
178 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
179 const LocationContext *LCtx = Pred->getLocationContext();
180 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
181 LCtx->getAnalysisDeclContext()->getBody(),
182 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
185 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
186 const StackFrameContext *calleeCtx) {
187 const Decl *RuntimeCallee = calleeCtx->getDecl();
188 const Decl *StaticDecl = Call->getDecl();
189 assert(RuntimeCallee);
192 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
195 /// The call exit is simulated with a sequence of nodes, which occur between
196 /// CallExitBegin and CallExitEnd. The following operations occur between the
197 /// two program points:
198 /// 1. CallExitBegin (triggers the start of call exit sequence)
199 /// 2. Bind the return value
200 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
201 /// 4. CallExitEnd (switch to the caller context)
202 /// 5. PostStmt<CallExpr>
203 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
204 // Step 1 CEBNode was generated before the call.
205 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
206 const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
208 // The parent context might not be a stack frame, so make sure we
209 // look up the first enclosing stack frame.
210 const StackFrameContext *callerCtx =
211 calleeCtx->getParent()->getStackFrame();
213 const Stmt *CE = calleeCtx->getCallSite();
214 ProgramStateRef state = CEBNode->getState();
215 // Find the last statement in the function and the corresponding basic block.
216 const Stmt *LastSt = nullptr;
217 const CFGBlock *Blk = nullptr;
218 std::tie(LastSt, Blk) = getLastStmt(CEBNode);
220 // Generate a CallEvent /before/ cleaning the state, so that we can get the
221 // correct value for 'this' (if necessary).
222 CallEventManager &CEMgr = getStateManager().getCallEventManager();
223 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
225 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
227 // If the callee returns an expression, bind its value to CallExpr.
229 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
230 const LocationContext *LCtx = CEBNode->getLocationContext();
231 SVal V = state->getSVal(RS, LCtx);
233 // Ensure that the return type matches the type of the returned Expr.
234 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
235 QualType ReturnedTy =
236 CallEvent::getDeclaredResultType(calleeCtx->getDecl());
237 if (!ReturnedTy.isNull()) {
238 if (const Expr *Ex = dyn_cast<Expr>(CE)) {
239 V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
245 state = state->BindExpr(CE, callerCtx, V);
248 // Bind the constructed object value to CXXConstructExpr.
249 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
250 loc::MemRegionVal This =
251 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
252 SVal ThisV = state->getSVal(This);
253 ThisV = state->getSVal(ThisV.castAs<Loc>());
254 state = state->BindExpr(CCE, callerCtx, ThisV);
257 if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
258 // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
259 // while to reach the actual CXXNewExpr element from here, so keep the
260 // region for later use.
261 // Additionally cast the return value of the inlined operator new
262 // (which is of type 'void *') to the correct object type.
263 SVal AllocV = state->getSVal(CNE, callerCtx);
264 AllocV = svalBuilder.evalCast(
265 AllocV, CNE->getType(),
266 getContext().getPointerType(getContext().VoidTy));
268 state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
273 // Step 3: BindedRetNode -> CleanedNodes
274 // If we can find a statement and a block in the inlined function, run remove
275 // dead bindings before returning from the call. This is important to ensure
276 // that we report the issues such as leaks in the stack contexts in which
278 ExplodedNodeSet CleanedNodes;
279 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
280 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
281 PostStmt Loc(LastSt, calleeCtx, &retValBind);
283 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
284 BindedRetNode->addPredecessor(CEBNode, G);
288 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
290 // Here, we call the Symbol Reaper with 0 statement and callee location
291 // context, telling it to clean up everything in the callee's context
292 // (and its children). We use the callee's function body as a diagnostic
293 // statement, with which the program point will be associated.
294 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
295 calleeCtx->getAnalysisDeclContext()->getBody(),
296 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
297 currBldrCtx = nullptr;
299 CleanedNodes.Add(CEBNode);
302 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
303 E = CleanedNodes.end(); I != E; ++I) {
305 // Step 4: Generate the CallExit and leave the callee's context.
306 // CleanedNodes -> CEENode
307 CallExitEnd Loc(calleeCtx, callerCtx);
309 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
311 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
312 CEENode->addPredecessor(*I, G);
316 // Step 5: Perform the post-condition check of the CallExpr and enqueue the
317 // result onto the work list.
318 // CEENode -> Dst -> WorkList
319 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
320 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
322 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
324 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
326 ExplodedNodeSet DstPostCall;
327 if (const CXXNewExpr *CNE = dyn_cast_or_null<CXXNewExpr>(CE)) {
328 ExplodedNodeSet DstPostPostCallCallback;
329 getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
330 CEENode, *UpdatedCall, *this,
331 /*WasInlined=*/true);
332 for (auto I : DstPostPostCallCallback) {
333 getCheckerManager().runCheckersForNewAllocator(
335 *getObjectUnderConstruction(I->getState(), CNE,
336 calleeCtx->getParent()),
337 DstPostCall, I, *this,
338 /*WasInlined=*/true);
341 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
343 /*WasInlined=*/true);
346 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
347 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
349 /*WasInlined=*/true);
351 !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
352 AMgr.getAnalyzerOptions().mayInlineCXXAllocator())) {
353 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
354 *this, /*WasInlined=*/true);
356 Dst.insert(DstPostCall);
359 // Enqueue the next element in the block.
360 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
362 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
363 calleeCtx->getIndex()+1);
368 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
369 bool &IsRecursive, unsigned &StackDepth) {
374 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
375 const Decl *DI = SFC->getDecl();
377 // Mark recursive (and mutually recursive) functions and always count
378 // them when measuring the stack depth.
382 LCtx = LCtx->getParent();
386 // Do not count the small functions when determining the stack depth.
387 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
388 const CFG *CalleeCFG = CalleeADC->getCFG();
389 if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
392 LCtx = LCtx->getParent();
396 // The GDM component containing the dynamic dispatch bifurcation info. When
397 // the exact type of the receiver is not known, we want to explore both paths -
398 // one on which we do inline it and the other one on which we don't. This is
399 // done to ensure we do not drop coverage.
400 // This is the map from the receiver region to a bool, specifying either we
401 // consider this region's information precise or not along the given path.
403 enum DynamicDispatchMode {
404 DynamicDispatchModeInlined = 1,
405 DynamicDispatchModeConservative
407 } // end anonymous namespace
409 REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
410 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
413 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
414 NodeBuilder &Bldr, ExplodedNode *Pred,
415 ProgramStateRef State) {
418 const LocationContext *CurLC = Pred->getLocationContext();
419 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
420 const LocationContext *ParentOfCallee = CallerSFC;
421 if (Call.getKind() == CE_Block &&
422 !cast<BlockCall>(Call).isConversionFromLambda()) {
423 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
424 assert(BR && "If we have the block definition we should have its region");
425 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
426 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
431 // This may be NULL, but that's fine.
432 const Expr *CallE = Call.getOriginExpr();
434 // Construct a new stack frame for the callee.
435 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
436 const StackFrameContext *CalleeSFC =
437 CalleeADC->getStackFrame(ParentOfCallee, CallE,
438 currBldrCtx->getBlock(),
441 CallEnter Loc(CallE, CalleeSFC, CurLC);
443 // Construct a new state which contains the mapping from actual to
445 State = State->enterStackFrame(Call, CalleeSFC);
448 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
449 N->addPredecessor(Pred, G);
451 Engine.getWorkList()->enqueue(N);
454 // If we decided to inline the call, the successor has been manually
455 // added onto the work list so remove it from the node builder.
456 Bldr.takeNodes(Pred);
459 Engine.FunctionSummaries->bumpNumTimesInlined(D);
461 // Mark the decl as visited.
463 VisitedCallees->insert(D);
468 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
470 const void *ReplayState = State->get<ReplayWithoutInlining>();
474 assert(ReplayState == CallE && "Backtracked to the wrong call.");
477 return State->remove<ReplayWithoutInlining>();
480 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
481 ExplodedNodeSet &dst) {
482 // Perform the previsit of the CallExpr.
483 ExplodedNodeSet dstPreVisit;
484 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
486 // Get the call in its initial state. We use this as a template to perform
488 CallEventManager &CEMgr = getStateManager().getCallEventManager();
489 CallEventRef<> CallTemplate
490 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
492 // Evaluate the function call. We try each of the checkers
493 // to see if the can evaluate the function call.
494 ExplodedNodeSet dstCallEvaluated;
495 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
497 evalCall(dstCallEvaluated, *I, *CallTemplate);
500 // Finally, perform the post-condition check of the CallExpr and store
501 // the created nodes in 'Dst'.
502 // Note that if the call was inlined, dstCallEvaluated will be empty.
503 // The post-CallExpr check will occur in processCallExit.
504 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
508 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
509 const CallEvent &Call) {
510 // WARNING: At this time, the state attached to 'Call' may be older than the
511 // state in 'Pred'. This is a minor optimization since CheckerManager will
512 // use an updated CallEvent instance when calling checkers, but if 'Call' is
513 // ever used directly in this function all callers should be updated to pass
514 // the most recent state. (It is probably not worth doing the work here since
515 // for some callers this will not be necessary.)
517 // Run any pre-call checks using the generic call interface.
518 ExplodedNodeSet dstPreVisit;
519 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
521 // Actually evaluate the function call. We try each of the checkers
522 // to see if the can evaluate the function call, and get a callback at
523 // defaultEvalCall if all of them fail.
524 ExplodedNodeSet dstCallEvaluated;
525 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
528 // Finally, run any post-call checks.
529 getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
533 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
534 const LocationContext *LCtx,
535 ProgramStateRef State) {
536 const Expr *E = Call.getOriginExpr();
540 // Some method families have known return values.
541 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
542 switch (Msg->getMethodFamily()) {
545 case OMF_autorelease:
548 // These methods return their receivers.
549 return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
552 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
553 SVal ThisV = C->getCXXThisVal();
554 ThisV = State->getSVal(ThisV.castAs<Loc>());
555 return State->BindExpr(E, LCtx, ThisV);
559 QualType ResultTy = Call.getResultType();
560 unsigned Count = currBldrCtx->blockCount();
561 if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
562 // Conjure a temporary if the function returns an object by value.
564 assert(RTC->getStmt() == Call.getOriginExpr());
565 EvalCallOptions CallOpts; // FIXME: We won't really need those.
566 std::tie(State, Target) =
567 prepareForObjectConstruction(Call.getOriginExpr(), State, LCtx,
568 RTC->getConstructionContext(), CallOpts);
569 assert(Target.getAsRegion());
570 // Invalidate the region so that it didn't look uninitialized. Don't notify
572 State = State->invalidateRegions(Target.getAsRegion(), E, Count, LCtx,
573 /* CausedByPointerEscape=*/false, nullptr,
576 R = State->getSVal(Target.castAs<Loc>(), E->getType());
578 // Conjure a symbol if the return value is unknown.
580 // See if we need to conjure a heap pointer instead of
581 // a regular unknown pointer.
582 bool IsHeapPointer = false;
583 if (const auto *CNE = dyn_cast<CXXNewExpr>(E))
584 if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
585 // FIXME: Delegate this to evalCall in MallocChecker?
586 IsHeapPointer = true;
589 R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count)
590 : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy,
593 return State->BindExpr(E, LCtx, R);
596 // Conservatively evaluate call by invalidating regions and binding
597 // a conjured return value.
598 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
600 ProgramStateRef State) {
601 State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
602 State = bindReturnValue(Call, Pred->getLocationContext(), State);
604 // And make the result node.
605 Bldr.generateNode(Call.getProgramPoint(), State, Pred);
608 ExprEngine::CallInlinePolicy
609 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
610 AnalyzerOptions &Opts,
611 const ExprEngine::EvalCallOptions &CallOpts) {
612 const LocationContext *CurLC = Pred->getLocationContext();
613 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
614 switch (Call.getKind()) {
619 case CE_CXXMemberOperator:
620 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
621 return CIP_DisallowedAlways;
623 case CE_CXXConstructor: {
624 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
625 return CIP_DisallowedAlways;
627 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
629 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
631 auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
632 const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
635 if (CC && isa<NewAllocatedObjectConstructionContext>(CC) &&
636 !Opts.mayInlineCXXAllocator())
637 return CIP_DisallowedOnce;
639 // FIXME: We don't handle constructors or destructors for arrays properly.
640 // Even once we do, we still need to be careful about implicitly-generated
641 // initializers for array fields in default move/copy constructors.
642 // We still allow construction into ElementRegion targets when they don't
643 // represent array elements.
644 if (CallOpts.IsArrayCtorOrDtor)
645 return CIP_DisallowedOnce;
647 // Inlining constructors requires including initializers in the CFG.
648 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
649 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
652 // If the destructor is trivial, it's always safe to inline the constructor.
653 if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
656 // For other types, only inline constructors if destructor inlining is
658 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
659 return CIP_DisallowedAlways;
661 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
662 // If we don't handle temporary destructors, we shouldn't inline
663 // their constructors.
664 if (CallOpts.IsTemporaryCtorOrDtor &&
665 !Opts.includeTemporaryDtorsInCFG())
666 return CIP_DisallowedOnce;
668 // If we did not find the correct this-region, it would be pointless
669 // to inline the constructor. Instead we will simply invalidate
670 // the fake temporary target.
671 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
672 return CIP_DisallowedOnce;
674 // If the temporary is lifetime-extended by binding it to a reference-type
675 // field within an aggregate, automatic destructors don't work properly.
676 if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
677 return CIP_DisallowedOnce;
682 case CE_CXXDestructor: {
683 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
684 return CIP_DisallowedAlways;
686 // Inlining destructors requires building the CFG correctly.
687 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
688 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
691 // FIXME: We don't handle constructors or destructors for arrays properly.
692 if (CallOpts.IsArrayCtorOrDtor)
693 return CIP_DisallowedOnce;
695 // Allow disabling temporary destructor inlining with a separate option.
696 if (CallOpts.IsTemporaryCtorOrDtor && !Opts.mayInlineCXXTemporaryDtors())
697 return CIP_DisallowedOnce;
699 // If we did not find the correct this-region, it would be pointless
700 // to inline the destructor. Instead we will simply invalidate
701 // the fake temporary target.
702 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
703 return CIP_DisallowedOnce;
706 case CE_CXXAllocator:
707 if (Opts.mayInlineCXXAllocator())
709 // Do not inline allocators until we model deallocators.
710 // This is unfortunate, but basically necessary for smart pointers and such.
711 return CIP_DisallowedAlways;
713 if (!Opts.mayInlineObjCMethod())
714 return CIP_DisallowedAlways;
715 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
716 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
717 return CIP_DisallowedAlways;
724 /// Returns true if the given C++ class contains a member with the given name.
725 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
727 const IdentifierInfo &II = Ctx.Idents.get(Name);
728 DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
729 if (!RD->lookup(DeclName).empty())
732 CXXBasePaths Paths(false, false, false);
733 if (RD->lookupInBases(
734 [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
735 return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
743 /// Returns true if the given C++ class is a container or iterator.
745 /// Our heuristic for this is whether it contains a method named 'begin()' or a
746 /// nested type named 'iterator' or 'iterator_category'.
747 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
748 return hasMember(Ctx, RD, "begin") ||
749 hasMember(Ctx, RD, "iterator") ||
750 hasMember(Ctx, RD, "iterator_category");
753 /// Returns true if the given function refers to a method of a C++ container
756 /// We generally do a poor job modeling most containers right now, and might
757 /// prefer not to inline their methods.
758 static bool isContainerMethod(const ASTContext &Ctx,
759 const FunctionDecl *FD) {
760 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
761 return isContainerClass(Ctx, MD->getParent());
765 /// Returns true if the given function is the destructor of a class named
767 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
768 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
772 const CXXRecordDecl *RD = Dtor->getParent();
773 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
774 if (II->isStr("shared_ptr"))
780 /// Returns true if the function in \p CalleeADC may be inlined in general.
782 /// This checks static properties of the function, such as its signature and
783 /// CFG, to determine whether the analyzer should ever consider inlining it,
785 static bool mayInlineDecl(AnalysisManager &AMgr,
786 AnalysisDeclContext *CalleeADC) {
787 AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
788 // FIXME: Do not inline variadic calls.
789 if (CallEvent::isVariadic(CalleeADC->getDecl()))
792 // Check certain C++-related inlining policies.
793 ASTContext &Ctx = CalleeADC->getASTContext();
794 if (Ctx.getLangOpts().CPlusPlus) {
795 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
796 // Conditionally control the inlining of template functions.
797 if (!Opts.mayInlineTemplateFunctions())
798 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
801 // Conditionally control the inlining of C++ standard library functions.
802 if (!Opts.mayInlineCXXStandardLibrary())
803 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
804 if (AnalysisDeclContext::isInStdNamespace(FD))
807 // Conditionally control the inlining of methods on objects that look
808 // like C++ containers.
809 if (!Opts.mayInlineCXXContainerMethods())
810 if (!AMgr.isInCodeFile(FD->getLocation()))
811 if (isContainerMethod(Ctx, FD))
814 // Conditionally control the inlining of the destructor of C++ shared_ptr.
815 // We don't currently do a good job modeling shared_ptr because we can't
816 // see the reference count, so treating as opaque is probably the best
818 if (!Opts.mayInlineCXXSharedPtrDtor())
819 if (isCXXSharedPtrDtor(FD))
824 // It is possible that the CFG cannot be constructed.
825 // Be safe, and check if the CalleeCFG is valid.
826 const CFG *CalleeCFG = CalleeADC->getCFG();
830 // Do not inline large functions.
831 if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
834 // It is possible that the live variables analysis cannot be
835 // run. If so, bail out.
836 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
842 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
843 const ExplodedNode *Pred,
844 const EvalCallOptions &CallOpts) {
848 AnalysisManager &AMgr = getAnalysisManager();
849 AnalyzerOptions &Opts = AMgr.options;
850 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
851 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
853 // The auto-synthesized bodies are essential to inline as they are
854 // usually small and commonly used. Note: we should do this check early on to
855 // ensure we always inline these calls.
856 if (CalleeADC->isBodyAutosynthesized())
859 if (!AMgr.shouldInlineCall())
862 // Check if this function has been marked as non-inlinable.
863 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
864 if (MayInline.hasValue()) {
865 if (!MayInline.getValue())
869 // We haven't actually checked the static properties of this function yet.
870 // Do that now, and record our decision in the function summaries.
871 if (mayInlineDecl(getAnalysisManager(), CalleeADC)) {
872 Engine.FunctionSummaries->markMayInline(D);
874 Engine.FunctionSummaries->markShouldNotInline(D);
879 // Check if we should inline a call based on its kind.
880 // FIXME: this checks both static and dynamic properties of the call, which
881 // means we're redoing a bit of work that could be cached in the function
883 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
884 if (CIP != CIP_Allowed) {
885 if (CIP == CIP_DisallowedAlways) {
886 assert(!MayInline.hasValue() || MayInline.getValue());
887 Engine.FunctionSummaries->markShouldNotInline(D);
892 const CFG *CalleeCFG = CalleeADC->getCFG();
894 // Do not inline if recursive or we've reached max stack frame count.
895 bool IsRecursive = false;
896 unsigned StackDepth = 0;
897 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
898 if ((StackDepth >= Opts.InlineMaxStackDepth) &&
899 ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
903 // Do not inline large functions too many times.
904 if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
905 Opts.getMaxTimesInlineLarge()) &&
906 CalleeCFG->getNumBlockIDs() >=
907 Opts.getMinCFGSizeTreatFunctionsAsLarge()) {
908 NumReachedInlineCountMax++;
912 if (HowToInline == Inline_Minimal &&
913 (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
920 static bool isTrivialObjectAssignment(const CallEvent &Call) {
921 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
925 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
928 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
931 return MD->isTrivial();
934 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
935 const CallEvent &CallTemplate,
936 const EvalCallOptions &CallOpts) {
937 // Make sure we have the most recent state attached to the call.
938 ProgramStateRef State = Pred->getState();
939 CallEventRef<> Call = CallTemplate.cloneWithState(State);
941 // Special-case trivial assignment operators.
942 if (isTrivialObjectAssignment(*Call)) {
943 performTrivialCopy(Bldr, Pred, *Call);
947 // Try to inline the call.
948 // The origin expression here is just used as a kind of checksum;
949 // this should still be safe even for CallEvents that don't come from exprs.
950 const Expr *E = Call->getOriginExpr();
952 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
953 if (InlinedFailedState) {
954 // If we already tried once and failed, make sure we don't retry later.
955 State = InlinedFailedState;
957 RuntimeDefinition RD = Call->getRuntimeDefinition();
958 const Decl *D = RD.getDecl();
959 if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
960 if (RD.mayHaveOtherDefinitions()) {
961 AnalyzerOptions &Options = getAnalysisManager().options;
963 // Explore with and without inlining the call.
964 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
965 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
969 // Don't inline if we're not in any dynamic dispatch mode.
970 if (Options.getIPAMode() != IPAK_DynamicDispatch) {
971 conservativeEvalCall(*Call, Bldr, Pred, State);
976 // We are not bifurcating and we do have a Decl, so just inline.
977 if (inlineCall(*Call, D, Bldr, Pred, State))
982 // If we can't inline it, handle the return value and invalidate the regions.
983 conservativeEvalCall(*Call, Bldr, Pred, State);
986 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
987 const CallEvent &Call, const Decl *D,
988 NodeBuilder &Bldr, ExplodedNode *Pred) {
990 BifurReg = BifurReg->StripCasts();
992 // Check if we've performed the split already - note, we only want
993 // to split the path once per memory region.
994 ProgramStateRef State = Pred->getState();
995 const unsigned *BState =
996 State->get<DynamicDispatchBifurcationMap>(BifurReg);
998 // If we are on "inline path", keep inlining if possible.
999 if (*BState == DynamicDispatchModeInlined)
1000 if (inlineCall(Call, D, Bldr, Pred, State))
1002 // If inline failed, or we are on the path where we assume we
1003 // don't have enough info about the receiver to inline, conjure the
1004 // return value and invalidate the regions.
1005 conservativeEvalCall(Call, Bldr, Pred, State);
1009 // If we got here, this is the first time we process a message to this
1010 // region, so split the path.
1011 ProgramStateRef IState =
1012 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1013 DynamicDispatchModeInlined);
1014 inlineCall(Call, D, Bldr, Pred, IState);
1016 ProgramStateRef NoIState =
1017 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1018 DynamicDispatchModeConservative);
1019 conservativeEvalCall(Call, Bldr, Pred, NoIState);
1021 NumOfDynamicDispatchPathSplits++;
1024 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1025 ExplodedNodeSet &Dst) {
1026 ExplodedNodeSet dstPreVisit;
1027 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1029 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1031 if (RS->getRetValue()) {
1032 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1033 ei = dstPreVisit.end(); it != ei; ++it) {
1034 B.generateNode(RS, *it, (*it)->getState());