1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines ExprEngine's support for calls and returns.
11 //===----------------------------------------------------------------------===//
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Support/Casting.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Support/SaveAndRestore.h"
28 using namespace clang;
31 #define DEBUG_TYPE "ExprEngine"
33 STATISTIC(NumOfDynamicDispatchPathSplits,
34 "The # of times we split the path due to imprecise dynamic dispatch info");
36 STATISTIC(NumInlinedCalls,
37 "The # of times we inlined a call");
39 STATISTIC(NumReachedInlineCountMax,
40 "The # of times we reached inline count maximum");
42 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
44 // Get the entry block in the CFG of the callee.
45 const StackFrameContext *calleeCtx = CE.getCalleeContext();
46 PrettyStackTraceLocationContext CrashInfo(calleeCtx);
47 const CFGBlock *Entry = CE.getEntry();
50 assert(Entry->empty());
51 assert(Entry->succ_size() == 1);
53 // Get the solitary successor.
54 const CFGBlock *Succ = *(Entry->succ_begin());
56 // Construct an edge representing the starting location in the callee.
57 BlockEdge Loc(Entry, Succ, calleeCtx);
59 ProgramStateRef state = Pred->getState();
61 // Construct a new node, notify checkers that analysis of the function has
62 // begun, and add the resultant nodes to the worklist.
64 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
65 Node->addPredecessor(Pred, G);
67 ExplodedNodeSet DstBegin;
68 processBeginOfFunction(BC, Node, DstBegin, Loc);
69 Engine.enqueue(DstBegin);
73 // Find the last statement on the path to the exploded node and the
74 // corresponding Block.
75 static std::pair<const Stmt*,
76 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
77 const Stmt *S = nullptr;
78 const CFGBlock *Blk = nullptr;
79 const StackFrameContext *SF = Node->getStackFrame();
81 // Back up through the ExplodedGraph until we reach a statement node in this
84 const ProgramPoint &PP = Node->getLocation();
86 if (PP.getStackFrame() == SF) {
87 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
90 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
91 S = CEE->getCalleeContext()->getCallSite();
95 // If there is no statement, this is an implicitly-generated call.
96 // We'll walk backwards over it and then continue the loop to find
97 // an actual statement.
98 Optional<CallEnter> CE;
100 Node = Node->getFirstPred();
101 CE = Node->getLocationAs<CallEnter>();
102 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
104 // Continue searching the graph.
105 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
108 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
109 // If we reached the CallEnter for this function, it has no statements.
110 if (CE->getCalleeContext() == SF)
114 if (Node->pred_empty())
115 return std::make_pair(nullptr, nullptr);
117 Node = *Node->pred_begin();
120 return std::make_pair(S, Blk);
123 /// Adjusts a return value when the called function's return type does not
124 /// match the caller's expression type. This can happen when a dynamic call
125 /// is devirtualized, and the overriding method has a covariant (more specific)
126 /// return type than the parent's method. For C++ objects, this means we need
127 /// to add base casts.
128 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
129 StoreManager &StoreMgr) {
130 // For now, the only adjustments we handle apply only to locations.
134 // If the types already match, don't do any unnecessary work.
135 ExpectedTy = ExpectedTy.getCanonicalType();
136 ActualTy = ActualTy.getCanonicalType();
137 if (ExpectedTy == ActualTy)
140 // No adjustment is needed between Objective-C pointer types.
141 if (ExpectedTy->isObjCObjectPointerType() &&
142 ActualTy->isObjCObjectPointerType())
145 // C++ object pointers may need "derived-to-base" casts.
146 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
147 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
148 if (ExpectedClass && ActualClass) {
149 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
150 /*DetectVirtual=*/false);
151 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
152 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
153 return StoreMgr.evalDerivedToBase(V, Paths.front());
157 // Unfortunately, Objective-C does not enforce that overridden methods have
158 // covariant return types, so we can't assert that that never happens.
159 // Be safe and return UnknownVal().
163 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
165 ExplodedNodeSet &Dst) {
166 // Find the last statement in the function and the corresponding basic block.
167 const Stmt *LastSt = nullptr;
168 const CFGBlock *Blk = nullptr;
169 std::tie(LastSt, Blk) = getLastStmt(Pred);
170 if (!Blk || !LastSt) {
175 // Here, we destroy the current location context. We use the current
176 // function's entire body as a diagnostic statement, with which the program
177 // point will be associated. However, we only want to use LastStmt as a
178 // reference for what to clean up if it's a ReturnStmt; otherwise, everything
180 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
181 const LocationContext *LCtx = Pred->getLocationContext();
182 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
183 LCtx->getAnalysisDeclContext()->getBody(),
184 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
187 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
188 const StackFrameContext *calleeCtx) {
189 const Decl *RuntimeCallee = calleeCtx->getDecl();
190 const Decl *StaticDecl = Call->getDecl();
191 assert(RuntimeCallee);
194 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
197 /// The call exit is simulated with a sequence of nodes, which occur between
198 /// CallExitBegin and CallExitEnd. The following operations occur between the
199 /// two program points:
200 /// 1. CallExitBegin (triggers the start of call exit sequence)
201 /// 2. Bind the return value
202 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
203 /// 4. CallExitEnd (switch to the caller context)
204 /// 5. PostStmt<CallExpr>
205 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
206 // Step 1 CEBNode was generated before the call.
207 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
208 const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
210 // The parent context might not be a stack frame, so make sure we
211 // look up the first enclosing stack frame.
212 const StackFrameContext *callerCtx =
213 calleeCtx->getParent()->getStackFrame();
215 const Stmt *CE = calleeCtx->getCallSite();
216 ProgramStateRef state = CEBNode->getState();
217 // Find the last statement in the function and the corresponding basic block.
218 const Stmt *LastSt = nullptr;
219 const CFGBlock *Blk = nullptr;
220 std::tie(LastSt, Blk) = getLastStmt(CEBNode);
222 // Generate a CallEvent /before/ cleaning the state, so that we can get the
223 // correct value for 'this' (if necessary).
224 CallEventManager &CEMgr = getStateManager().getCallEventManager();
225 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
227 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
229 // If the callee returns an expression, bind its value to CallExpr.
231 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
232 const LocationContext *LCtx = CEBNode->getLocationContext();
233 SVal V = state->getSVal(RS, LCtx);
235 // Ensure that the return type matches the type of the returned Expr.
236 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
237 QualType ReturnedTy =
238 CallEvent::getDeclaredResultType(calleeCtx->getDecl());
239 if (!ReturnedTy.isNull()) {
240 if (const Expr *Ex = dyn_cast<Expr>(CE)) {
241 V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
247 state = state->BindExpr(CE, callerCtx, V);
250 // Bind the constructed object value to CXXConstructExpr.
251 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
252 loc::MemRegionVal This =
253 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
254 SVal ThisV = state->getSVal(This);
255 ThisV = state->getSVal(ThisV.castAs<Loc>());
256 state = state->BindExpr(CCE, callerCtx, ThisV);
259 if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
260 // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
261 // while to reach the actual CXXNewExpr element from here, so keep the
262 // region for later use.
263 // Additionally cast the return value of the inlined operator new
264 // (which is of type 'void *') to the correct object type.
265 SVal AllocV = state->getSVal(CNE, callerCtx);
266 AllocV = svalBuilder.evalCast(
267 AllocV, CNE->getType(),
268 getContext().getPointerType(getContext().VoidTy));
270 state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
275 // Step 3: BindedRetNode -> CleanedNodes
276 // If we can find a statement and a block in the inlined function, run remove
277 // dead bindings before returning from the call. This is important to ensure
278 // that we report the issues such as leaks in the stack contexts in which
280 ExplodedNodeSet CleanedNodes;
281 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
282 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
283 PostStmt Loc(LastSt, calleeCtx, &retValBind);
285 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
286 BindedRetNode->addPredecessor(CEBNode, G);
290 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
292 // Here, we call the Symbol Reaper with 0 statement and callee location
293 // context, telling it to clean up everything in the callee's context
294 // (and its children). We use the callee's function body as a diagnostic
295 // statement, with which the program point will be associated.
296 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
297 calleeCtx->getAnalysisDeclContext()->getBody(),
298 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
299 currBldrCtx = nullptr;
301 CleanedNodes.Add(CEBNode);
304 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
305 E = CleanedNodes.end(); I != E; ++I) {
307 // Step 4: Generate the CallExit and leave the callee's context.
308 // CleanedNodes -> CEENode
309 CallExitEnd Loc(calleeCtx, callerCtx);
311 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
313 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
314 CEENode->addPredecessor(*I, G);
318 // Step 5: Perform the post-condition check of the CallExpr and enqueue the
319 // result onto the work list.
320 // CEENode -> Dst -> WorkList
321 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
322 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
324 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
326 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
328 ExplodedNodeSet DstPostCall;
329 if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
330 ExplodedNodeSet DstPostPostCallCallback;
331 getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
332 CEENode, *UpdatedCall, *this,
333 /*wasInlined=*/true);
334 for (ExplodedNode *I : DstPostPostCallCallback) {
335 getCheckerManager().runCheckersForNewAllocator(
336 cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
337 /*wasInlined=*/true);
340 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
342 /*wasInlined=*/true);
345 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
346 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
348 /*wasInlined=*/true);
350 !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
351 AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
352 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
353 *this, /*wasInlined=*/true);
355 Dst.insert(DstPostCall);
358 // Enqueue the next element in the block.
359 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
361 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
362 calleeCtx->getIndex()+1);
367 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
368 // When there are no branches in the function, it means that there's no
369 // exponential complexity introduced by inlining such function.
370 // Such functions also don't trigger various fundamental problems
371 // with our inlining mechanism, such as the problem of
372 // inlined defensive checks. Hence isLinear().
373 const CFG *Cfg = ADC->getCFG();
374 return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
377 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
378 const CFG *Cfg = ADC->getCFG();
379 return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
382 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
383 const CFG *Cfg = ADC->getCFG();
384 return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
387 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
388 bool &IsRecursive, unsigned &StackDepth) {
393 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
394 const Decl *DI = SFC->getDecl();
396 // Mark recursive (and mutually recursive) functions and always count
397 // them when measuring the stack depth.
401 LCtx = LCtx->getParent();
405 // Do not count the small functions when determining the stack depth.
406 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
407 if (!isSmall(CalleeADC))
410 LCtx = LCtx->getParent();
414 // The GDM component containing the dynamic dispatch bifurcation info. When
415 // the exact type of the receiver is not known, we want to explore both paths -
416 // one on which we do inline it and the other one on which we don't. This is
417 // done to ensure we do not drop coverage.
418 // This is the map from the receiver region to a bool, specifying either we
419 // consider this region's information precise or not along the given path.
421 enum DynamicDispatchMode {
422 DynamicDispatchModeInlined = 1,
423 DynamicDispatchModeConservative
425 } // end anonymous namespace
427 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
428 const MemRegion *, unsigned)
430 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
431 NodeBuilder &Bldr, ExplodedNode *Pred,
432 ProgramStateRef State) {
435 const LocationContext *CurLC = Pred->getLocationContext();
436 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
437 const LocationContext *ParentOfCallee = CallerSFC;
438 if (Call.getKind() == CE_Block &&
439 !cast<BlockCall>(Call).isConversionFromLambda()) {
440 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
441 assert(BR && "If we have the block definition we should have its region");
442 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
443 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
448 // This may be NULL, but that's fine.
449 const Expr *CallE = Call.getOriginExpr();
451 // Construct a new stack frame for the callee.
452 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
453 const StackFrameContext *CalleeSFC =
454 CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
455 currBldrCtx->blockCount(), currStmtIdx);
457 CallEnter Loc(CallE, CalleeSFC, CurLC);
459 // Construct a new state which contains the mapping from actual to
461 State = State->enterStackFrame(Call, CalleeSFC);
464 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
465 N->addPredecessor(Pred, G);
467 Engine.getWorkList()->enqueue(N);
470 // If we decided to inline the call, the successor has been manually
471 // added onto the work list so remove it from the node builder.
472 Bldr.takeNodes(Pred);
475 Engine.FunctionSummaries->bumpNumTimesInlined(D);
477 // Mark the decl as visited.
479 VisitedCallees->insert(D);
484 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
486 const void *ReplayState = State->get<ReplayWithoutInlining>();
490 assert(ReplayState == CallE && "Backtracked to the wrong call.");
493 return State->remove<ReplayWithoutInlining>();
496 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
497 ExplodedNodeSet &dst) {
498 // Perform the previsit of the CallExpr.
499 ExplodedNodeSet dstPreVisit;
500 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
502 // Get the call in its initial state. We use this as a template to perform
504 CallEventManager &CEMgr = getStateManager().getCallEventManager();
505 CallEventRef<> CallTemplate
506 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
508 // Evaluate the function call. We try each of the checkers
509 // to see if the can evaluate the function call.
510 ExplodedNodeSet dstCallEvaluated;
511 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
513 evalCall(dstCallEvaluated, *I, *CallTemplate);
516 // Finally, perform the post-condition check of the CallExpr and store
517 // the created nodes in 'Dst'.
518 // Note that if the call was inlined, dstCallEvaluated will be empty.
519 // The post-CallExpr check will occur in processCallExit.
520 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
524 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
525 const CallEvent &Call) {
526 const Expr *E = Call.getOriginExpr();
527 // FIXME: Constructors to placement arguments of operator new
528 // are not supported yet.
529 if (!E || isa<CXXNewExpr>(E))
532 const LocationContext *LC = Call.getLocationContext();
533 for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
534 unsigned I = Call.getASTArgumentIndex(CallI);
535 if (Optional<SVal> V =
536 getObjectUnderConstruction(State, {E, I}, LC)) {
539 assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
540 ->getStackFrame()->getParent()
541 ->getStackFrame() == LC->getStackFrame());
542 State = finishObjectConstruction(State, {E, I}, LC);
549 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
551 const CallEvent &Call) {
552 ProgramStateRef State = Pred->getState();
553 ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
554 if (CleanedState == State) {
559 const Expr *E = Call.getOriginExpr();
560 const LocationContext *LC = Call.getLocationContext();
561 NodeBuilder B(Pred, Dst, *currBldrCtx);
562 static SimpleProgramPointTag Tag("ExprEngine",
563 "Finish argument construction");
564 PreStmt PP(E, LC, &Tag);
565 B.generateNode(PP, CleanedState, Pred);
568 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
569 const CallEvent &Call) {
570 // WARNING: At this time, the state attached to 'Call' may be older than the
571 // state in 'Pred'. This is a minor optimization since CheckerManager will
572 // use an updated CallEvent instance when calling checkers, but if 'Call' is
573 // ever used directly in this function all callers should be updated to pass
574 // the most recent state. (It is probably not worth doing the work here since
575 // for some callers this will not be necessary.)
577 // Run any pre-call checks using the generic call interface.
578 ExplodedNodeSet dstPreVisit;
579 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
582 // Actually evaluate the function call. We try each of the checkers
583 // to see if the can evaluate the function call, and get a callback at
584 // defaultEvalCall if all of them fail.
585 ExplodedNodeSet dstCallEvaluated;
586 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
587 Call, *this, EvalCallOptions());
589 // If there were other constructors called for object-type arguments
590 // of this call, clean them up.
591 ExplodedNodeSet dstArgumentCleanup;
592 for (ExplodedNode *I : dstCallEvaluated)
593 finishArgumentConstruction(dstArgumentCleanup, I, Call);
595 ExplodedNodeSet dstPostCall;
596 getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
599 // Escaping symbols conjured during invalidating the regions above.
600 // Note that, for inlined calls the nodes were put back into the worklist,
601 // so we can assume that every node belongs to a conservative call at this
604 // Run pointerEscape callback with the newly conjured symbols.
605 SmallVector<std::pair<SVal, SVal>, 8> Escaped;
606 for (ExplodedNode *I : dstPostCall) {
607 NodeBuilder B(I, Dst, *currBldrCtx);
608 ProgramStateRef State = I->getState();
612 for (const ParmVarDecl *PVD : Call.parameters()) {
614 QualType ParamTy = PVD->getType();
615 if (ParamTy.isNull() ||
616 (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
618 QualType Pointee = ParamTy->getPointeeType();
619 if (Pointee.isConstQualified() || Pointee->isVoidType())
621 if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
622 Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
626 State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
627 PSK_EscapeOutParameters, &Call);
629 if (State == I->getState())
632 B.generateNode(I->getLocation(), State, I);
636 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
637 const LocationContext *LCtx,
638 ProgramStateRef State) {
639 const Expr *E = Call.getOriginExpr();
643 // Some method families have known return values.
644 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
645 switch (Msg->getMethodFamily()) {
648 case OMF_autorelease:
651 // These methods return their receivers.
652 return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
655 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
656 SVal ThisV = C->getCXXThisVal();
657 ThisV = State->getSVal(ThisV.castAs<Loc>());
658 return State->BindExpr(E, LCtx, ThisV);
662 QualType ResultTy = Call.getResultType();
663 unsigned Count = currBldrCtx->blockCount();
664 if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
665 // Conjure a temporary if the function returns an object by value.
667 assert(RTC->getStmt() == Call.getOriginExpr());
668 EvalCallOptions CallOpts; // FIXME: We won't really need those.
669 std::tie(State, Target) =
670 handleConstructionContext(Call.getOriginExpr(), State, LCtx,
671 RTC->getConstructionContext(), CallOpts);
672 const MemRegion *TargetR = Target.getAsRegion();
674 // Invalidate the region so that it didn't look uninitialized. If this is
675 // a field or element constructor, we do not want to invalidate
676 // the whole structure. Pointer escape is meaningless because
677 // the structure is a product of conservative evaluation
678 // and therefore contains nothing interesting at this point.
679 RegionAndSymbolInvalidationTraits ITraits;
680 ITraits.setTrait(TargetR,
681 RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
682 State = State->invalidateRegions(TargetR, E, Count, LCtx,
683 /* CausesPointerEscape=*/false, nullptr,
686 R = State->getSVal(Target.castAs<Loc>(), E->getType());
688 // Conjure a symbol if the return value is unknown.
690 // See if we need to conjure a heap pointer instead of
691 // a regular unknown pointer.
692 bool IsHeapPointer = false;
693 if (const auto *CNE = dyn_cast<CXXNewExpr>(E))
694 if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
695 // FIXME: Delegate this to evalCall in MallocChecker?
696 IsHeapPointer = true;
699 R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count)
700 : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy,
703 return State->BindExpr(E, LCtx, R);
706 // Conservatively evaluate call by invalidating regions and binding
707 // a conjured return value.
708 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
709 ExplodedNode *Pred, ProgramStateRef State) {
710 State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
711 State = bindReturnValue(Call, Pred->getLocationContext(), State);
713 // And make the result node.
714 Bldr.generateNode(Call.getProgramPoint(), State, Pred);
717 ExprEngine::CallInlinePolicy
718 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
719 AnalyzerOptions &Opts,
720 const EvalCallOptions &CallOpts) {
721 const LocationContext *CurLC = Pred->getLocationContext();
722 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
723 switch (Call.getKind()) {
728 case CE_CXXMemberOperator:
729 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
730 return CIP_DisallowedAlways;
732 case CE_CXXConstructor: {
733 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
734 return CIP_DisallowedAlways;
736 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
738 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
740 auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
741 const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
744 if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
745 !Opts.MayInlineCXXAllocator)
746 return CIP_DisallowedOnce;
748 // FIXME: We don't handle constructors or destructors for arrays properly.
749 // Even once we do, we still need to be careful about implicitly-generated
750 // initializers for array fields in default move/copy constructors.
751 // We still allow construction into ElementRegion targets when they don't
752 // represent array elements.
753 if (CallOpts.IsArrayCtorOrDtor)
754 return CIP_DisallowedOnce;
756 // Inlining constructors requires including initializers in the CFG.
757 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
758 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
761 // If the destructor is trivial, it's always safe to inline the constructor.
762 if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
765 // For other types, only inline constructors if destructor inlining is
767 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
768 return CIP_DisallowedAlways;
770 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
771 // If we don't handle temporary destructors, we shouldn't inline
772 // their constructors.
773 if (CallOpts.IsTemporaryCtorOrDtor &&
774 !Opts.ShouldIncludeTemporaryDtorsInCFG)
775 return CIP_DisallowedOnce;
777 // If we did not find the correct this-region, it would be pointless
778 // to inline the constructor. Instead we will simply invalidate
779 // the fake temporary target.
780 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
781 return CIP_DisallowedOnce;
783 // If the temporary is lifetime-extended by binding it to a reference-type
784 // field within an aggregate, automatic destructors don't work properly.
785 if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
786 return CIP_DisallowedOnce;
791 case CE_CXXInheritedConstructor: {
792 // This doesn't really increase the cost of inlining ever, because
793 // the stack frame of the inherited constructor is trivial.
796 case CE_CXXDestructor: {
797 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
798 return CIP_DisallowedAlways;
800 // Inlining destructors requires building the CFG correctly.
801 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
802 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
805 // FIXME: We don't handle constructors or destructors for arrays properly.
806 if (CallOpts.IsArrayCtorOrDtor)
807 return CIP_DisallowedOnce;
809 // Allow disabling temporary destructor inlining with a separate option.
810 if (CallOpts.IsTemporaryCtorOrDtor &&
811 !Opts.MayInlineCXXTemporaryDtors)
812 return CIP_DisallowedOnce;
814 // If we did not find the correct this-region, it would be pointless
815 // to inline the destructor. Instead we will simply invalidate
816 // the fake temporary target.
817 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
818 return CIP_DisallowedOnce;
821 case CE_CXXDeallocator:
823 case CE_CXXAllocator:
824 if (Opts.MayInlineCXXAllocator)
826 // Do not inline allocators until we model deallocators.
827 // This is unfortunate, but basically necessary for smart pointers and such.
828 return CIP_DisallowedAlways;
830 if (!Opts.MayInlineObjCMethod)
831 return CIP_DisallowedAlways;
832 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
833 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
834 return CIP_DisallowedAlways;
841 /// Returns true if the given C++ class contains a member with the given name.
842 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
844 const IdentifierInfo &II = Ctx.Idents.get(Name);
845 DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
846 if (!RD->lookup(DeclName).empty())
849 CXXBasePaths Paths(false, false, false);
850 if (RD->lookupInBases(
851 [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
852 return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
860 /// Returns true if the given C++ class is a container or iterator.
862 /// Our heuristic for this is whether it contains a method named 'begin()' or a
863 /// nested type named 'iterator' or 'iterator_category'.
864 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
865 return hasMember(Ctx, RD, "begin") ||
866 hasMember(Ctx, RD, "iterator") ||
867 hasMember(Ctx, RD, "iterator_category");
870 /// Returns true if the given function refers to a method of a C++ container
873 /// We generally do a poor job modeling most containers right now, and might
874 /// prefer not to inline their methods.
875 static bool isContainerMethod(const ASTContext &Ctx,
876 const FunctionDecl *FD) {
877 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
878 return isContainerClass(Ctx, MD->getParent());
882 /// Returns true if the given function is the destructor of a class named
884 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
885 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
889 const CXXRecordDecl *RD = Dtor->getParent();
890 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
891 if (II->isStr("shared_ptr"))
897 /// Returns true if the function in \p CalleeADC may be inlined in general.
899 /// This checks static properties of the function, such as its signature and
900 /// CFG, to determine whether the analyzer should ever consider inlining it,
902 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
903 AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
904 // FIXME: Do not inline variadic calls.
905 if (CallEvent::isVariadic(CalleeADC->getDecl()))
908 // Check certain C++-related inlining policies.
909 ASTContext &Ctx = CalleeADC->getASTContext();
910 if (Ctx.getLangOpts().CPlusPlus) {
911 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
912 // Conditionally control the inlining of template functions.
913 if (!Opts.MayInlineTemplateFunctions)
914 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
917 // Conditionally control the inlining of C++ standard library functions.
918 if (!Opts.MayInlineCXXStandardLibrary)
919 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
920 if (AnalysisDeclContext::isInStdNamespace(FD))
923 // Conditionally control the inlining of methods on objects that look
924 // like C++ containers.
925 if (!Opts.MayInlineCXXContainerMethods)
926 if (!AMgr.isInCodeFile(FD->getLocation()))
927 if (isContainerMethod(Ctx, FD))
930 // Conditionally control the inlining of the destructor of C++ shared_ptr.
931 // We don't currently do a good job modeling shared_ptr because we can't
932 // see the reference count, so treating as opaque is probably the best
934 if (!Opts.MayInlineCXXSharedPtrDtor)
935 if (isCXXSharedPtrDtor(FD))
940 // It is possible that the CFG cannot be constructed.
941 // Be safe, and check if the CalleeCFG is valid.
942 const CFG *CalleeCFG = CalleeADC->getCFG();
946 // Do not inline large functions.
947 if (isHuge(CalleeADC))
950 // It is possible that the live variables analysis cannot be
951 // run. If so, bail out.
952 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
958 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
959 const ExplodedNode *Pred,
960 const EvalCallOptions &CallOpts) {
964 AnalysisManager &AMgr = getAnalysisManager();
965 AnalyzerOptions &Opts = AMgr.options;
966 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
967 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
969 // The auto-synthesized bodies are essential to inline as they are
970 // usually small and commonly used. Note: we should do this check early on to
971 // ensure we always inline these calls.
972 if (CalleeADC->isBodyAutosynthesized())
975 if (!AMgr.shouldInlineCall())
978 // Check if this function has been marked as non-inlinable.
979 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
980 if (MayInline.hasValue()) {
981 if (!MayInline.getValue())
985 // We haven't actually checked the static properties of this function yet.
986 // Do that now, and record our decision in the function summaries.
987 if (mayInlineDecl(CalleeADC)) {
988 Engine.FunctionSummaries->markMayInline(D);
990 Engine.FunctionSummaries->markShouldNotInline(D);
995 // Check if we should inline a call based on its kind.
996 // FIXME: this checks both static and dynamic properties of the call, which
997 // means we're redoing a bit of work that could be cached in the function
999 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1000 if (CIP != CIP_Allowed) {
1001 if (CIP == CIP_DisallowedAlways) {
1002 assert(!MayInline.hasValue() || MayInline.getValue());
1003 Engine.FunctionSummaries->markShouldNotInline(D);
1008 // Do not inline if recursive or we've reached max stack frame count.
1009 bool IsRecursive = false;
1010 unsigned StackDepth = 0;
1011 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1012 if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1013 (!isSmall(CalleeADC) || IsRecursive))
1016 // Do not inline large functions too many times.
1017 if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1018 Opts.MaxTimesInlineLarge) &&
1019 isLarge(CalleeADC)) {
1020 NumReachedInlineCountMax++;
1024 if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1030 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1031 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1035 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1038 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1041 return MD->isTrivial();
1044 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1045 const CallEvent &CallTemplate,
1046 const EvalCallOptions &CallOpts) {
1047 // Make sure we have the most recent state attached to the call.
1048 ProgramStateRef State = Pred->getState();
1049 CallEventRef<> Call = CallTemplate.cloneWithState(State);
1051 // Special-case trivial assignment operators.
1052 if (isTrivialObjectAssignment(*Call)) {
1053 performTrivialCopy(Bldr, Pred, *Call);
1057 // Try to inline the call.
1058 // The origin expression here is just used as a kind of checksum;
1059 // this should still be safe even for CallEvents that don't come from exprs.
1060 const Expr *E = Call->getOriginExpr();
1062 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1063 if (InlinedFailedState) {
1064 // If we already tried once and failed, make sure we don't retry later.
1065 State = InlinedFailedState;
1067 RuntimeDefinition RD = Call->getRuntimeDefinition();
1068 const Decl *D = RD.getDecl();
1069 if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1070 if (RD.mayHaveOtherDefinitions()) {
1071 AnalyzerOptions &Options = getAnalysisManager().options;
1073 // Explore with and without inlining the call.
1074 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1075 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1079 // Don't inline if we're not in any dynamic dispatch mode.
1080 if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1081 conservativeEvalCall(*Call, Bldr, Pred, State);
1086 // We are not bifurcating and we do have a Decl, so just inline.
1087 if (inlineCall(*Call, D, Bldr, Pred, State))
1092 // If we can't inline it, handle the return value and invalidate the regions.
1093 conservativeEvalCall(*Call, Bldr, Pred, State);
1096 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1097 const CallEvent &Call, const Decl *D,
1098 NodeBuilder &Bldr, ExplodedNode *Pred) {
1100 BifurReg = BifurReg->StripCasts();
1102 // Check if we've performed the split already - note, we only want
1103 // to split the path once per memory region.
1104 ProgramStateRef State = Pred->getState();
1105 const unsigned *BState =
1106 State->get<DynamicDispatchBifurcationMap>(BifurReg);
1108 // If we are on "inline path", keep inlining if possible.
1109 if (*BState == DynamicDispatchModeInlined)
1110 if (inlineCall(Call, D, Bldr, Pred, State))
1112 // If inline failed, or we are on the path where we assume we
1113 // don't have enough info about the receiver to inline, conjure the
1114 // return value and invalidate the regions.
1115 conservativeEvalCall(Call, Bldr, Pred, State);
1119 // If we got here, this is the first time we process a message to this
1120 // region, so split the path.
1121 ProgramStateRef IState =
1122 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1123 DynamicDispatchModeInlined);
1124 inlineCall(Call, D, Bldr, Pred, IState);
1126 ProgramStateRef NoIState =
1127 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1128 DynamicDispatchModeConservative);
1129 conservativeEvalCall(Call, Bldr, Pred, NoIState);
1131 NumOfDynamicDispatchPathSplits++;
1134 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1135 ExplodedNodeSet &Dst) {
1136 ExplodedNodeSet dstPreVisit;
1137 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1139 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1141 if (RS->getRetValue()) {
1142 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1143 ei = dstPreVisit.end(); it != ei; ++it) {
1144 B.generateNode(RS, *it, (*it)->getState());