1 //===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file transforms calls of the current function (self recursion) followed
10 // by a return instruction with a branch to the entry of the function, creating
11 // a loop. This pass also implements the following extensions to the basic
14 // 1. Trivial instructions between the call and return do not prevent the
15 // transformation from taking place, though currently the analysis cannot
16 // support moving any really useful instructions (only dead ones).
17 // 2. This pass transforms functions that are prevented from being tail
18 // recursive by an associative and commutative expression to use an
19 // accumulator variable, thus compiling the typical naive factorial or
20 // 'fib' implementation into efficient code.
21 // 3. TRE is performed if the function returns void, if the return
22 // returns the result returned by the call, or if the function returns a
23 // run-time constant on all exits from the function. It is possible, though
24 // unlikely, that the return returns something else (like constant 0), and
25 // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
26 // the function return the exact same value.
27 // 4. If it can prove that callees do not access their caller stack frame,
28 // they are marked as eligible for tail call elimination (by the code
31 // There are several improvements that could be made:
33 // 1. If the function has any alloca instructions, these instructions will be
34 // moved out of the entry block of the function, causing them to be
35 // evaluated each time through the tail recursion. Safely keeping allocas
36 // in the entry block requires analysis to proves that the tail-called
37 // function does not read or write the stack object.
38 // 2. Tail recursion is only performed if the call immediately precedes the
39 // return instruction. It's possible that there could be a jump between
40 // the call and the return.
41 // 3. There can be intervening operations between the call and the return that
42 // prevent the TRE from occurring. For example, there could be GEP's and
43 // stores to memory that will not be read or written by the call. This
44 // requires some substantial analysis (such as with DSA) to prove safe to
45 // move ahead of the call, but doing so could allow many more TREs to be
46 // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
47 // 4. The algorithm we use to detect if callees access their caller stack
48 // frames is very primitive.
50 //===----------------------------------------------------------------------===//
52 #include "llvm/Transforms/Scalar/TailRecursionElimination.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/Statistic.h"
56 #include "llvm/Analysis/CFG.h"
57 #include "llvm/Analysis/CaptureTracking.h"
58 #include "llvm/Analysis/DomTreeUpdater.h"
59 #include "llvm/Analysis/GlobalsModRef.h"
60 #include "llvm/Analysis/InlineCost.h"
61 #include "llvm/Analysis/InstructionSimplify.h"
62 #include "llvm/Analysis/Loads.h"
63 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
64 #include "llvm/Analysis/PostDominators.h"
65 #include "llvm/Analysis/TargetTransformInfo.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/DiagnosticInfo.h"
71 #include "llvm/IR/Dominators.h"
72 #include "llvm/IR/Function.h"
73 #include "llvm/IR/InstIterator.h"
74 #include "llvm/IR/Instructions.h"
75 #include "llvm/IR/IntrinsicInst.h"
76 #include "llvm/IR/Module.h"
77 #include "llvm/IR/ValueHandle.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Transforms/Scalar.h"
83 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
86 #define DEBUG_TYPE "tailcallelim"
88 STATISTIC(NumEliminated, "Number of tail calls removed");
89 STATISTIC(NumRetDuped, "Number of return duplicated");
90 STATISTIC(NumAccumAdded, "Number of accumulators introduced");
92 /// Scan the specified function for alloca instructions.
93 /// If it contains any dynamic allocas, returns false.
94 static bool canTRE(Function &F) {
95 // Because of PR962, we don't TRE dynamic allocas.
96 return llvm::all_of(instructions(F), [](Instruction &I) {
97 auto *AI = dyn_cast<AllocaInst>(&I);
98 return !AI || AI->isStaticAlloca();
103 struct AllocaDerivedValueTracker {
104 // Start at a root value and walk its use-def chain to mark calls that use the
105 // value or a derived value in AllocaUsers, and places where it may escape in
107 void walk(Value *Root) {
108 SmallVector<Use *, 32> Worklist;
109 SmallPtrSet<Use *, 32> Visited;
111 auto AddUsesToWorklist = [&](Value *V) {
112 for (auto &U : V->uses()) {
113 if (!Visited.insert(&U).second)
115 Worklist.push_back(&U);
119 AddUsesToWorklist(Root);
121 while (!Worklist.empty()) {
122 Use *U = Worklist.pop_back_val();
123 Instruction *I = cast<Instruction>(U->getUser());
125 switch (I->getOpcode()) {
126 case Instruction::Call:
127 case Instruction::Invoke: {
128 auto &CB = cast<CallBase>(*I);
129 // If the alloca-derived argument is passed byval it is not an escape
130 // point, or a use of an alloca. Calling with byval copies the contents
131 // of the alloca into argument registers or stack slots, which exist
132 // beyond the lifetime of the current frame.
133 if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U)))
136 CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U));
137 callUsesLocalStack(CB, IsNocapture);
139 // If the alloca-derived argument is passed in as nocapture, then it
140 // can't propagate to the call's return. That would be capturing.
145 case Instruction::Load: {
146 // The result of a load is not alloca-derived (unless an alloca has
147 // otherwise escaped, but this is a local analysis).
150 case Instruction::Store: {
151 if (U->getOperandNo() == 0)
152 EscapePoints.insert(I);
153 continue; // Stores have no users to analyze.
155 case Instruction::BitCast:
156 case Instruction::GetElementPtr:
157 case Instruction::PHI:
158 case Instruction::Select:
159 case Instruction::AddrSpaceCast:
162 EscapePoints.insert(I);
166 AddUsesToWorklist(I);
170 void callUsesLocalStack(CallBase &CB, bool IsNocapture) {
171 // Add it to the list of alloca users.
172 AllocaUsers.insert(&CB);
174 // If it's nocapture then it can't capture this alloca.
178 // If it can write to memory, it can leak the alloca value.
179 if (!CB.onlyReadsMemory())
180 EscapePoints.insert(&CB);
183 SmallPtrSet<Instruction *, 32> AllocaUsers;
184 SmallPtrSet<Instruction *, 32> EscapePoints;
188 static bool markTails(Function &F, bool &AllCallsAreTailCalls,
189 OptimizationRemarkEmitter *ORE) {
190 if (F.callsFunctionThatReturnsTwice())
192 AllCallsAreTailCalls = true;
194 // The local stack holds all alloca instructions and all byval arguments.
195 AllocaDerivedValueTracker Tracker;
196 for (Argument &Arg : F.args()) {
197 if (Arg.hasByValAttr())
202 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
206 bool Modified = false;
208 // Track whether a block is reachable after an alloca has escaped. Blocks that
209 // contain the escaping instruction will be marked as being visited without an
210 // escaped alloca, since that is how the block began.
216 DenseMap<BasicBlock *, VisitType> Visited;
218 // We propagate the fact that an alloca has escaped from block to successor.
219 // Visit the blocks that are propagating the escapedness first. To do this, we
220 // maintain two worklists.
221 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
223 // We may enter a block and visit it thinking that no alloca has escaped yet,
224 // then see an escape point and go back around a loop edge and come back to
225 // the same block twice. Because of this, we defer setting tail on calls when
226 // we first encounter them in a block. Every entry in this list does not
227 // statically use an alloca via use-def chain analysis, but may find an alloca
228 // through other means if the block turns out to be reachable after an escape
230 SmallVector<CallInst *, 32> DeferredTails;
232 BasicBlock *BB = &F.getEntryBlock();
233 VisitType Escaped = UNESCAPED;
235 for (auto &I : *BB) {
236 if (Tracker.EscapePoints.count(&I))
239 CallInst *CI = dyn_cast<CallInst>(&I);
240 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I))
243 bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles();
245 if (!IsNoTail && CI->doesNotAccessMemory()) {
246 // A call to a readnone function whose arguments are all things computed
247 // outside this function can be marked tail. Even if you stored the
248 // alloca address into a global, a readnone function can't load the
251 // Note that this runs whether we know an alloca has escaped or not. If
252 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
253 bool SafeToTail = true;
254 for (auto &Arg : CI->arg_operands()) {
255 if (isa<Constant>(Arg.getUser()))
257 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
258 if (!A->hasByValAttr())
266 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
267 << "marked as tail call candidate (readnone)";
275 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) {
276 DeferredTails.push_back(CI);
278 AllCallsAreTailCalls = false;
282 for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) {
283 auto &State = Visited[SuccBB];
284 if (State < Escaped) {
286 if (State == ESCAPED)
287 WorklistEscaped.push_back(SuccBB);
289 WorklistUnescaped.push_back(SuccBB);
293 if (!WorklistEscaped.empty()) {
294 BB = WorklistEscaped.pop_back_val();
298 while (!WorklistUnescaped.empty()) {
299 auto *NextBB = WorklistUnescaped.pop_back_val();
300 if (Visited[NextBB] == UNESCAPED) {
309 for (CallInst *CI : DeferredTails) {
310 if (Visited[CI->getParent()] != ESCAPED) {
311 // If the escape point was part way through the block, calls after the
312 // escape point wouldn't have been put into DeferredTails.
313 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
317 AllCallsAreTailCalls = false;
324 /// Return true if it is safe to move the specified
325 /// instruction from after the call to before the call, assuming that all
326 /// instructions between the call and this instruction are movable.
328 static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) {
329 // FIXME: We can move load/store/call/free instructions above the call if the
330 // call does not mod/ref the memory location being processed.
331 if (I->mayHaveSideEffects()) // This also handles volatile loads.
334 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
335 // Loads may always be moved above calls without side effects.
336 if (CI->mayHaveSideEffects()) {
337 // Non-volatile loads may be moved above a call with side effects if it
338 // does not write to memory and the load provably won't trap.
339 // Writes to memory only matter if they may alias the pointer
340 // being loaded from.
341 const DataLayout &DL = L->getModule()->getDataLayout();
342 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
343 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
344 L->getAlign(), DL, L))
349 // Otherwise, if this is a side-effect free instruction, check to make sure
350 // that it does not use the return value of the call. If it doesn't use the
351 // return value of the call, it must only use things that are defined before
352 // the call, or movable instructions between the call and the instruction
354 return !is_contained(I->operands(), CI);
357 static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) {
358 if (!I->isAssociative() || !I->isCommutative())
361 assert(I->getNumOperands() == 2 &&
362 "Associative/commutative operations should have 2 args!");
364 // Exactly one operand should be the result of the call instruction.
365 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
366 (I->getOperand(0) != CI && I->getOperand(1) != CI))
369 // The only user of this instruction we allow is a single return instruction.
370 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
376 static Instruction *firstNonDbg(BasicBlock::iterator I) {
377 while (isa<DbgInfoIntrinsic>(I))
383 class TailRecursionEliminator {
385 const TargetTransformInfo *TTI;
387 OptimizationRemarkEmitter *ORE;
390 // The below are shared state we want to have available when eliminating any
391 // calls in the function. There values should be populated by
392 // createTailRecurseLoopHeader the first time we find a call we can eliminate.
393 BasicBlock *HeaderBB = nullptr;
394 SmallVector<PHINode *, 8> ArgumentPHIs;
395 bool RemovableCallsMustBeMarkedTail = false;
397 // PHI node to store our return value.
398 PHINode *RetPN = nullptr;
400 // i1 PHI node to track if we have a valid return value stored in RetPN.
401 PHINode *RetKnownPN = nullptr;
403 // Vector of select instructions we insereted. These selects use RetKnownPN
404 // to either propagate RetPN or select a new return value.
405 SmallVector<SelectInst *, 8> RetSelects;
407 // The below are shared state needed when performing accumulator recursion.
408 // There values should be populated by insertAccumulator the first time we
409 // find an elimination that requires an accumulator.
411 // PHI node to store our current accumulated value.
412 PHINode *AccPN = nullptr;
414 // The instruction doing the accumulating.
415 Instruction *AccumulatorRecursionInstr = nullptr;
417 TailRecursionEliminator(Function &F, const TargetTransformInfo *TTI,
418 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE,
420 : F(F), TTI(TTI), AA(AA), ORE(ORE), DTU(DTU) {}
422 CallInst *findTRECandidate(Instruction *TI,
423 bool CannotTailCallElimCallsMarkedTail);
425 void createTailRecurseLoopHeader(CallInst *CI);
427 void insertAccumulator(Instruction *AccRecInstr);
429 bool eliminateCall(CallInst *CI);
431 bool foldReturnAndProcessPred(ReturnInst *Ret,
432 bool CannotTailCallElimCallsMarkedTail);
434 bool processReturningBlock(ReturnInst *Ret,
435 bool CannotTailCallElimCallsMarkedTail);
437 void cleanupAndFinalize();
440 static bool eliminate(Function &F, const TargetTransformInfo *TTI,
441 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE,
442 DomTreeUpdater &DTU);
446 CallInst *TailRecursionEliminator::findTRECandidate(
447 Instruction *TI, bool CannotTailCallElimCallsMarkedTail) {
448 BasicBlock *BB = TI->getParent();
450 if (&BB->front() == TI) // Make sure there is something before the terminator.
453 // Scan backwards from the return, checking to see if there is a tail call in
454 // this block. If so, set CI to it.
455 CallInst *CI = nullptr;
456 BasicBlock::iterator BBI(TI);
458 CI = dyn_cast<CallInst>(BBI);
459 if (CI && CI->getCalledFunction() == &F)
462 if (BBI == BB->begin())
463 return nullptr; // Didn't find a potential tail call.
467 // If this call is marked as a tail call, and if there are dynamic allocas in
468 // the function, we cannot perform this optimization.
469 if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail)
472 // As a special case, detect code like this:
473 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
474 // and disable this xform in this case, because the code generator will
475 // lower the call to fabs into inline code.
476 if (BB == &F.getEntryBlock() &&
477 firstNonDbg(BB->front().getIterator()) == CI &&
478 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() &&
479 !TTI->isLoweredToCall(CI->getCalledFunction())) {
480 // A single-block function with just a call and a return. Check that
481 // the arguments match.
482 auto I = CI->arg_begin(), E = CI->arg_end();
483 Function::arg_iterator FI = F.arg_begin(), FE = F.arg_end();
484 for (; I != E && FI != FE; ++I, ++FI)
485 if (*I != &*FI) break;
486 if (I == E && FI == FE)
493 void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) {
494 HeaderBB = &F.getEntryBlock();
495 BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB);
496 NewEntry->takeName(HeaderBB);
497 HeaderBB->setName("tailrecurse");
498 BranchInst *BI = BranchInst::Create(HeaderBB, NewEntry);
499 BI->setDebugLoc(CI->getDebugLoc());
501 // If this function has self recursive calls in the tail position where some
502 // are marked tail and some are not, only transform one flavor or another.
503 // We have to choose whether we move allocas in the entry block to the new
504 // entry block or not, so we can't make a good choice for both. We make this
505 // decision here based on whether the first call we found to remove is
507 // NOTE: We could do slightly better here in the case that the function has
508 // no entry block allocas.
509 RemovableCallsMustBeMarkedTail = CI->isTailCall();
511 // If this tail call is marked 'tail' and if there are any allocas in the
512 // entry block, move them up to the new entry block.
513 if (RemovableCallsMustBeMarkedTail)
514 // Move all fixed sized allocas from HeaderBB to NewEntry.
515 for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(),
516 NEBI = NewEntry->begin();
518 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
519 if (isa<ConstantInt>(AI->getArraySize()))
520 AI->moveBefore(&*NEBI);
522 // Now that we have created a new block, which jumps to the entry
523 // block, insert a PHI node for each argument of the function.
524 // For now, we initialize each PHI to only have the real arguments
525 // which are passed in.
526 Instruction *InsertPos = &HeaderBB->front();
527 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
529 PHINode::Create(I->getType(), 2, I->getName() + ".tr", InsertPos);
530 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
531 PN->addIncoming(&*I, NewEntry);
532 ArgumentPHIs.push_back(PN);
535 // If the function doen't return void, create the RetPN and RetKnownPN PHI
536 // nodes to track our return value. We initialize RetPN with undef and
537 // RetKnownPN with false since we can't know our return value at function
539 Type *RetType = F.getReturnType();
540 if (!RetType->isVoidTy()) {
541 Type *BoolType = Type::getInt1Ty(F.getContext());
542 RetPN = PHINode::Create(RetType, 2, "ret.tr", InsertPos);
543 RetKnownPN = PHINode::Create(BoolType, 2, "ret.known.tr", InsertPos);
545 RetPN->addIncoming(UndefValue::get(RetType), NewEntry);
546 RetKnownPN->addIncoming(ConstantInt::getFalse(BoolType), NewEntry);
549 // The entry block was changed from HeaderBB to NewEntry.
550 // The forward DominatorTree needs to be recalculated when the EntryBB is
551 // changed. In this corner-case we recalculate the entire tree.
552 DTU.recalculate(*NewEntry->getParent());
555 void TailRecursionEliminator::insertAccumulator(Instruction *AccRecInstr) {
556 assert(!AccPN && "Trying to insert multiple accumulators");
558 AccumulatorRecursionInstr = AccRecInstr;
560 // Start by inserting a new PHI node for the accumulator.
561 pred_iterator PB = pred_begin(HeaderBB), PE = pred_end(HeaderBB);
562 AccPN = PHINode::Create(F.getReturnType(), std::distance(PB, PE) + 1,
563 "accumulator.tr", &HeaderBB->front());
565 // Loop over all of the predecessors of the tail recursion block. For the
566 // real entry into the function we seed the PHI with the identity constant for
567 // the accumulation operation. For any other existing branches to this block
568 // (due to other tail recursions eliminated) the accumulator is not modified.
569 // Because we haven't added the branch in the current block to HeaderBB yet,
570 // it will not show up as a predecessor.
571 for (pred_iterator PI = PB; PI != PE; ++PI) {
573 if (P == &F.getEntryBlock()) {
574 Constant *Identity = ConstantExpr::getBinOpIdentity(
575 AccRecInstr->getOpcode(), AccRecInstr->getType());
576 AccPN->addIncoming(Identity, P);
578 AccPN->addIncoming(AccPN, P);
585 bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
586 ReturnInst *Ret = cast<ReturnInst>(CI->getParent()->getTerminator());
588 // Ok, we found a potential tail call. We can currently only transform the
589 // tail call if all of the instructions between the call and the return are
590 // movable to above the call itself, leaving the call next to the return.
591 // Check that this is the case now.
592 Instruction *AccRecInstr = nullptr;
593 BasicBlock::iterator BBI(CI);
594 for (++BBI; &*BBI != Ret; ++BBI) {
595 if (canMoveAboveCall(&*BBI, CI, AA))
598 // If we can't move the instruction above the call, it might be because it
599 // is an associative and commutative operation that could be transformed
600 // using accumulator recursion elimination. Check to see if this is the
601 // case, and if so, remember which instruction accumulates for later.
602 if (AccPN || !canTransformAccumulatorRecursion(&*BBI, CI))
603 return false; // We cannot eliminate the tail recursion!
605 // Yes, this is accumulator recursion. Remember which instruction
610 BasicBlock *BB = Ret->getParent();
614 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
615 << "transforming tail recursion into loop";
618 // OK! We can transform this tail call. If this is the first one found,
619 // create the new entry block, allowing us to branch back to the old entry.
621 createTailRecurseLoopHeader(CI);
623 if (RemovableCallsMustBeMarkedTail && !CI->isTailCall())
626 // Ok, now that we know we have a pseudo-entry block WITH all of the
627 // required PHI nodes, add entries into the PHI node for the actual
628 // parameters passed into the tail-recursive call.
629 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
630 ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB);
633 insertAccumulator(AccRecInstr);
635 // Rewrite the accumulator recursion instruction so that it does not use
636 // the result of the call anymore, instead, use the PHI node we just
638 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
641 // Update our return value tracking
643 if (Ret->getReturnValue() == CI || AccRecInstr) {
644 // Defer selecting a return value
645 RetPN->addIncoming(RetPN, BB);
646 RetKnownPN->addIncoming(RetKnownPN, BB);
648 // We found a return value we want to use, insert a select instruction to
649 // select it if we don't already know what our return value will be and
650 // store the result in our return value PHI node.
651 SelectInst *SI = SelectInst::Create(
652 RetKnownPN, RetPN, Ret->getReturnValue(), "current.ret.tr", Ret);
653 RetSelects.push_back(SI);
655 RetPN->addIncoming(SI, BB);
656 RetKnownPN->addIncoming(ConstantInt::getTrue(RetKnownPN->getType()), BB);
660 AccPN->addIncoming(AccRecInstr ? AccRecInstr : AccPN, BB);
663 // Now that all of the PHI nodes are in place, remove the call and
664 // ret instructions, replacing them with an unconditional branch.
665 BranchInst *NewBI = BranchInst::Create(HeaderBB, Ret);
666 NewBI->setDebugLoc(CI->getDebugLoc());
668 BB->getInstList().erase(Ret); // Remove return.
669 BB->getInstList().erase(CI); // Remove call.
670 DTU.applyUpdates({{DominatorTree::Insert, BB, HeaderBB}});
675 bool TailRecursionEliminator::foldReturnAndProcessPred(
676 ReturnInst *Ret, bool CannotTailCallElimCallsMarkedTail) {
677 BasicBlock *BB = Ret->getParent();
681 // Make sure this block is a trivial return block.
682 assert(BB->getFirstNonPHIOrDbg() == Ret &&
683 "Trying to fold non-trivial return block");
685 // If the return block contains nothing but the return and PHI's,
686 // there might be an opportunity to duplicate the return in its
687 // predecessors and perform TRE there. Look for predecessors that end
688 // in unconditional branch and recursive call(s).
689 SmallVector<BranchInst*, 8> UncondBranchPreds;
690 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
691 BasicBlock *Pred = *PI;
692 Instruction *PTI = Pred->getTerminator();
693 if (BranchInst *BI = dyn_cast<BranchInst>(PTI))
694 if (BI->isUnconditional())
695 UncondBranchPreds.push_back(BI);
698 while (!UncondBranchPreds.empty()) {
699 BranchInst *BI = UncondBranchPreds.pop_back_val();
700 BasicBlock *Pred = BI->getParent();
702 findTRECandidate(BI, CannotTailCallElimCallsMarkedTail)) {
703 LLVM_DEBUG(dbgs() << "FOLDING: " << *BB
704 << "INTO UNCOND BRANCH PRED: " << *Pred);
705 FoldReturnIntoUncondBranch(Ret, BB, Pred, &DTU);
707 // Cleanup: if all predecessors of BB have been eliminated by
708 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
709 // because the ret instruction in there is still using a value which
710 // eliminateRecursiveTailCall will attempt to remove.
711 if (!BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
723 bool TailRecursionEliminator::processReturningBlock(
724 ReturnInst *Ret, bool CannotTailCallElimCallsMarkedTail) {
725 CallInst *CI = findTRECandidate(Ret, CannotTailCallElimCallsMarkedTail);
729 return eliminateCall(CI);
732 void TailRecursionEliminator::cleanupAndFinalize() {
733 // If we eliminated any tail recursions, it's possible that we inserted some
734 // silly PHI nodes which just merge an initial value (the incoming operand)
735 // with themselves. Check to see if we did and clean up our mess if so. This
736 // occurs when a function passes an argument straight through to its tail
738 for (PHINode *PN : ArgumentPHIs) {
739 // If the PHI Node is a dynamic constant, replace it with the value it is.
740 if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) {
741 PN->replaceAllUsesWith(PNV);
742 PN->eraseFromParent();
747 if (RetSelects.empty()) {
748 // If we didn't insert any select instructions, then we know we didn't
749 // store a return value and we can remove the PHI nodes we inserted.
750 RetPN->dropAllReferences();
751 RetPN->eraseFromParent();
753 RetKnownPN->dropAllReferences();
754 RetKnownPN->eraseFromParent();
757 // We need to insert a copy of our accumulator instruction before any
758 // return in the function, and return its result instead.
759 Instruction *AccRecInstr = AccumulatorRecursionInstr;
760 for (BasicBlock &BB : F) {
761 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
765 Instruction *AccRecInstrNew = AccRecInstr->clone();
766 AccRecInstrNew->setName("accumulator.ret.tr");
767 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
769 AccRecInstrNew->insertBefore(RI);
770 RI->setOperand(0, AccRecInstrNew);
774 // We need to insert a select instruction before any return left in the
775 // function to select our stored return value if we have one.
776 for (BasicBlock &BB : F) {
777 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
781 SelectInst *SI = SelectInst::Create(
782 RetKnownPN, RetPN, RI->getOperand(0), "current.ret.tr", RI);
783 RetSelects.push_back(SI);
784 RI->setOperand(0, SI);
788 // We need to insert a copy of our accumulator instruction before any
789 // of the selects we inserted, and select its result instead.
790 Instruction *AccRecInstr = AccumulatorRecursionInstr;
791 for (SelectInst *SI : RetSelects) {
792 Instruction *AccRecInstrNew = AccRecInstr->clone();
793 AccRecInstrNew->setName("accumulator.ret.tr");
794 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
795 SI->getFalseValue());
796 AccRecInstrNew->insertBefore(SI);
797 SI->setFalseValue(AccRecInstrNew);
804 bool TailRecursionEliminator::eliminate(Function &F,
805 const TargetTransformInfo *TTI,
807 OptimizationRemarkEmitter *ORE,
808 DomTreeUpdater &DTU) {
809 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
812 bool MadeChange = false;
813 bool AllCallsAreTailCalls = false;
814 MadeChange |= markTails(F, AllCallsAreTailCalls, ORE);
815 if (!AllCallsAreTailCalls)
818 // If this function is a varargs function, we won't be able to PHI the args
819 // right, so don't even try to convert it...
820 if (F.getFunctionType()->isVarArg())
823 // If false, we cannot perform TRE on tail calls marked with the 'tail'
824 // attribute, because doing so would cause the stack size to increase (real
825 // TRE would deallocate variable sized allocas, TRE doesn't).
826 bool CanTRETailMarkedCall = canTRE(F);
828 TailRecursionEliminator TRE(F, TTI, AA, ORE, DTU);
830 // Change any tail recursive calls to loops.
832 // FIXME: The code generator produces really bad code when an 'escaping
833 // alloca' is changed from being a static alloca to being a dynamic alloca.
834 // Until this is resolved, disable this transformation if that would ever
835 // happen. This bug is PR962.
836 for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) {
837 BasicBlock *BB = &*BBI++; // foldReturnAndProcessPred may delete BB.
838 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
839 bool Change = TRE.processReturningBlock(Ret, !CanTRETailMarkedCall);
840 if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
841 Change = TRE.foldReturnAndProcessPred(Ret, !CanTRETailMarkedCall);
842 MadeChange |= Change;
846 TRE.cleanupAndFinalize();
852 struct TailCallElim : public FunctionPass {
853 static char ID; // Pass identification, replacement for typeid
854 TailCallElim() : FunctionPass(ID) {
855 initializeTailCallElimPass(*PassRegistry::getPassRegistry());
858 void getAnalysisUsage(AnalysisUsage &AU) const override {
859 AU.addRequired<TargetTransformInfoWrapperPass>();
860 AU.addRequired<AAResultsWrapperPass>();
861 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
862 AU.addPreserved<GlobalsAAWrapperPass>();
863 AU.addPreserved<DominatorTreeWrapperPass>();
864 AU.addPreserved<PostDominatorTreeWrapperPass>();
867 bool runOnFunction(Function &F) override {
871 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
872 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
873 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
874 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
875 // There is no noticable performance difference here between Lazy and Eager
876 // UpdateStrategy based on some test results. It is feasible to switch the
877 // UpdateStrategy to Lazy if we find it profitable later.
878 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
880 return TailRecursionEliminator::eliminate(
881 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
882 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
883 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU);
888 char TailCallElim::ID = 0;
889 INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",
891 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
892 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
893 INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination",
896 // Public interface to the TailCallElimination pass
897 FunctionPass *llvm::createTailCallEliminationPass() {
898 return new TailCallElim();
901 PreservedAnalyses TailCallElimPass::run(Function &F,
902 FunctionAnalysisManager &AM) {
904 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F);
905 AliasAnalysis &AA = AM.getResult<AAManager>(F);
906 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
907 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F);
908 auto *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F);
909 // There is no noticable performance difference here between Lazy and Eager
910 // UpdateStrategy based on some test results. It is feasible to switch the
911 // UpdateStrategy to Lazy if we find it profitable later.
912 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
913 bool Changed = TailRecursionEliminator::eliminate(F, &TTI, &AA, &ORE, DTU);
916 return PreservedAnalyses::all();
917 PreservedAnalyses PA;
918 PA.preserve<GlobalsAA>();
919 PA.preserve<DominatorTreeAnalysis>();
920 PA.preserve<PostDominatorTreeAnalysis>();