1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
32 using namespace PatternMatch;
34 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
35 /// reusing an existing cast if a suitable one exists, moving an existing
36 /// cast if a suitable one exists but isn't in the right place, or
37 /// creating a new one.
38 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
39 Instruction::CastOps Op,
40 BasicBlock::iterator IP) {
41 // This function must be called with the builder having a valid insertion
42 // point. It doesn't need to be the actual IP where the uses of the returned
43 // cast will be added, but it must dominate such IP.
44 // We use this precondition to produce a cast that will dominate all its
45 // uses. In particular, this is crucial for the case where the builder's
46 // insertion point *is* the point where we were asked to put the cast.
47 // Since we don't know the builder's insertion point is actually
48 // where the uses will be added (only that it dominates it), we are
49 // not allowed to move it.
50 BasicBlock::iterator BIP = Builder.GetInsertPoint();
52 Instruction *Ret = nullptr;
54 // Check to see if there is already a cast!
55 for (User *U : V->users())
56 if (U->getType() == Ty)
57 if (CastInst *CI = dyn_cast<CastInst>(U))
58 if (CI->getOpcode() == Op) {
59 // If the cast isn't where we want it, create a new cast at IP.
60 // Likewise, do not reuse a cast at BIP because it must dominate
61 // instructions that might be inserted before BIP.
62 if (BasicBlock::iterator(CI) != IP || BIP == IP) {
63 // Create a new cast, and leave the old cast in place in case
64 // it is being used as an insert point. Clear its operand
65 // so that it doesn't hold anything live.
66 Ret = CastInst::Create(Op, V, Ty, "", &*IP);
68 CI->replaceAllUsesWith(Ret);
69 CI->setOperand(0, UndefValue::get(V->getType()));
78 Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
80 // We assert at the end of the function since IP might point to an
81 // instruction with different dominance properties than a cast
82 // (an invoke for example) and not dominate BIP (but the cast does).
83 assert(SE.DT.dominates(Ret, &*BIP));
85 rememberInstruction(Ret);
89 static BasicBlock::iterator findInsertPointAfter(Instruction *I,
90 BasicBlock *MustDominate) {
91 BasicBlock::iterator IP = ++I->getIterator();
92 if (auto *II = dyn_cast<InvokeInst>(I))
93 IP = II->getNormalDest()->begin();
95 while (isa<PHINode>(IP))
98 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
100 } else if (isa<CatchSwitchInst>(IP)) {
101 IP = MustDominate->getFirstInsertionPt();
103 assert(!IP->isEHPad() && "unexpected eh pad!");
109 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
110 /// which must be possible with a noop cast, doing what we can to share
112 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
113 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
114 assert((Op == Instruction::BitCast ||
115 Op == Instruction::PtrToInt ||
116 Op == Instruction::IntToPtr) &&
117 "InsertNoopCastOfTo cannot perform non-noop casts!");
118 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
119 "InsertNoopCastOfTo cannot change sizes!");
121 // Short-circuit unnecessary bitcasts.
122 if (Op == Instruction::BitCast) {
123 if (V->getType() == Ty)
125 if (CastInst *CI = dyn_cast<CastInst>(V)) {
126 if (CI->getOperand(0)->getType() == Ty)
127 return CI->getOperand(0);
130 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
131 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
132 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
133 if (CastInst *CI = dyn_cast<CastInst>(V))
134 if ((CI->getOpcode() == Instruction::PtrToInt ||
135 CI->getOpcode() == Instruction::IntToPtr) &&
136 SE.getTypeSizeInBits(CI->getType()) ==
137 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
138 return CI->getOperand(0);
139 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
140 if ((CE->getOpcode() == Instruction::PtrToInt ||
141 CE->getOpcode() == Instruction::IntToPtr) &&
142 SE.getTypeSizeInBits(CE->getType()) ==
143 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
144 return CE->getOperand(0);
147 // Fold a cast of a constant.
148 if (Constant *C = dyn_cast<Constant>(V))
149 return ConstantExpr::getCast(Op, C, Ty);
151 // Cast the argument at the beginning of the entry block, after
152 // any bitcasts of other arguments.
153 if (Argument *A = dyn_cast<Argument>(V)) {
154 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
155 while ((isa<BitCastInst>(IP) &&
156 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
157 cast<BitCastInst>(IP)->getOperand(0) != A) ||
158 isa<DbgInfoIntrinsic>(IP))
160 return ReuseOrCreateCast(A, Ty, Op, IP);
163 // Cast the instruction immediately after the instruction.
164 Instruction *I = cast<Instruction>(V);
165 BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock());
166 return ReuseOrCreateCast(I, Ty, Op, IP);
169 /// InsertBinop - Insert the specified binary operator, doing a small amount
170 /// of work to avoid inserting an obviously redundant operation.
171 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
172 Value *LHS, Value *RHS) {
173 // Fold a binop with constant operands.
174 if (Constant *CLHS = dyn_cast<Constant>(LHS))
175 if (Constant *CRHS = dyn_cast<Constant>(RHS))
176 return ConstantExpr::get(Opcode, CLHS, CRHS);
178 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
179 unsigned ScanLimit = 6;
180 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
181 // Scanning starts from the last instruction before the insertion point.
182 BasicBlock::iterator IP = Builder.GetInsertPoint();
183 if (IP != BlockBegin) {
185 for (; ScanLimit; --IP, --ScanLimit) {
186 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
188 if (isa<DbgInfoIntrinsic>(IP))
190 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
191 IP->getOperand(1) == RHS)
193 if (IP == BlockBegin) break;
197 // Save the original insertion point so we can restore it when we're done.
198 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
199 SCEVInsertPointGuard Guard(Builder, this);
201 // Move the insertion point out of as many loops as we can.
202 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
203 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
204 BasicBlock *Preheader = L->getLoopPreheader();
205 if (!Preheader) break;
207 // Ok, move up a level.
208 Builder.SetInsertPoint(Preheader->getTerminator());
211 // If we haven't found this binop, insert it.
212 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
213 BO->setDebugLoc(Loc);
214 rememberInstruction(BO);
219 /// FactorOutConstant - Test if S is divisible by Factor, using signed
220 /// division. If so, update S with Factor divided out and return true.
221 /// S need not be evenly divisible if a reasonable remainder can be
223 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
224 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
225 /// check to see if the divide was folded.
226 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
227 const SCEV *Factor, ScalarEvolution &SE,
228 const DataLayout &DL) {
229 // Everything is divisible by one.
235 S = SE.getConstant(S->getType(), 1);
239 // For a Constant, check for a multiple of the given factor.
240 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
244 // Check for divisibility.
245 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
247 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
248 // If the quotient is zero and the remainder is non-zero, reject
249 // the value at this scale. It will be considered for subsequent
252 const SCEV *Div = SE.getConstant(CI);
254 Remainder = SE.getAddExpr(
255 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
261 // In a Mul, check if there is a constant operand which is a multiple
262 // of the given factor.
263 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
264 // Size is known, check if there is a constant operand which is a multiple
265 // of the given factor. If so, we can factor it.
266 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
267 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
268 if (!C->getAPInt().srem(FC->getAPInt())) {
269 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
270 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
271 S = SE.getMulExpr(NewMulOps);
276 // In an AddRec, check if both start and step are divisible.
277 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
278 const SCEV *Step = A->getStepRecurrence(SE);
279 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
280 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
282 if (!StepRem->isZero())
284 const SCEV *Start = A->getStart();
285 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
287 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
288 A->getNoWrapFlags(SCEV::FlagNW));
295 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
296 /// is the number of SCEVAddRecExprs present, which are kept at the end of
299 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
301 ScalarEvolution &SE) {
302 unsigned NumAddRecs = 0;
303 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
305 // Group Ops into non-addrecs and addrecs.
306 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
307 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
308 // Let ScalarEvolution sort and simplify the non-addrecs list.
309 const SCEV *Sum = NoAddRecs.empty() ?
310 SE.getConstant(Ty, 0) :
311 SE.getAddExpr(NoAddRecs);
312 // If it returned an add, use the operands. Otherwise it simplified
313 // the sum into a single value, so just use that.
315 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
316 Ops.append(Add->op_begin(), Add->op_end());
317 else if (!Sum->isZero())
319 // Then append the addrecs.
320 Ops.append(AddRecs.begin(), AddRecs.end());
323 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
324 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
325 /// This helps expose more opportunities for folding parts of the expressions
326 /// into GEP indices.
328 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
330 ScalarEvolution &SE) {
332 SmallVector<const SCEV *, 8> AddRecs;
333 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
334 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
335 const SCEV *Start = A->getStart();
336 if (Start->isZero()) break;
337 const SCEV *Zero = SE.getConstant(Ty, 0);
338 AddRecs.push_back(SE.getAddRecExpr(Zero,
339 A->getStepRecurrence(SE),
341 A->getNoWrapFlags(SCEV::FlagNW)));
342 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
344 Ops.append(Add->op_begin(), Add->op_end());
345 e += Add->getNumOperands();
350 if (!AddRecs.empty()) {
351 // Add the addrecs onto the end of the list.
352 Ops.append(AddRecs.begin(), AddRecs.end());
353 // Resort the operand list, moving any constants to the front.
354 SimplifyAddOperands(Ops, Ty, SE);
358 /// expandAddToGEP - Expand an addition expression with a pointer type into
359 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
360 /// BasicAliasAnalysis and other passes analyze the result. See the rules
361 /// for getelementptr vs. inttoptr in
362 /// http://llvm.org/docs/LangRef.html#pointeraliasing
365 /// Design note: The correctness of using getelementptr here depends on
366 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
367 /// they may introduce pointer arithmetic which may not be safely converted
368 /// into getelementptr.
370 /// Design note: It might seem desirable for this function to be more
371 /// loop-aware. If some of the indices are loop-invariant while others
372 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
373 /// loop-invariant portions of the overall computation outside the loop.
374 /// However, there are a few reasons this is not done here. Hoisting simple
375 /// arithmetic is a low-level optimization that often isn't very
376 /// important until late in the optimization process. In fact, passes
377 /// like InstructionCombining will combine GEPs, even if it means
378 /// pushing loop-invariant computation down into loops, so even if the
379 /// GEPs were split here, the work would quickly be undone. The
380 /// LoopStrengthReduction pass, which is usually run quite late (and
381 /// after the last InstructionCombining pass), takes care of hoisting
382 /// loop-invariant portions of expressions, after considering what
383 /// can be folded using target addressing modes.
385 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
386 const SCEV *const *op_end,
390 Type *OriginalElTy = PTy->getElementType();
391 Type *ElTy = OriginalElTy;
392 SmallVector<Value *, 4> GepIndices;
393 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
394 bool AnyNonZeroIndices = false;
396 // Split AddRecs up into parts as either of the parts may be usable
397 // without the other.
398 SplitAddRecs(Ops, Ty, SE);
400 Type *IntPtrTy = DL.getIntPtrType(PTy);
402 // Descend down the pointer's type and attempt to convert the other
403 // operands into GEP indices, at each level. The first index in a GEP
404 // indexes into the array implied by the pointer operand; the rest of
405 // the indices index into the element or field type selected by the
408 // If the scale size is not 0, attempt to factor out a scale for
410 SmallVector<const SCEV *, 8> ScaledOps;
411 if (ElTy->isSized()) {
412 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
413 if (!ElSize->isZero()) {
414 SmallVector<const SCEV *, 8> NewOps;
415 for (const SCEV *Op : Ops) {
416 const SCEV *Remainder = SE.getConstant(Ty, 0);
417 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
418 // Op now has ElSize factored out.
419 ScaledOps.push_back(Op);
420 if (!Remainder->isZero())
421 NewOps.push_back(Remainder);
422 AnyNonZeroIndices = true;
424 // The operand was not divisible, so add it to the list of operands
425 // we'll scan next iteration.
426 NewOps.push_back(Op);
429 // If we made any changes, update Ops.
430 if (!ScaledOps.empty()) {
432 SimplifyAddOperands(Ops, Ty, SE);
437 // Record the scaled array index for this level of the type. If
438 // we didn't find any operands that could be factored, tentatively
439 // assume that element zero was selected (since the zero offset
440 // would obviously be folded away).
441 Value *Scaled = ScaledOps.empty() ?
442 Constant::getNullValue(Ty) :
443 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
444 GepIndices.push_back(Scaled);
446 // Collect struct field index operands.
447 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
448 bool FoundFieldNo = false;
449 // An empty struct has no fields.
450 if (STy->getNumElements() == 0) break;
451 // Field offsets are known. See if a constant offset falls within any of
452 // the struct fields.
455 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
456 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
457 const StructLayout &SL = *DL.getStructLayout(STy);
458 uint64_t FullOffset = C->getValue()->getZExtValue();
459 if (FullOffset < SL.getSizeInBytes()) {
460 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
461 GepIndices.push_back(
462 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
463 ElTy = STy->getTypeAtIndex(ElIdx);
465 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
466 AnyNonZeroIndices = true;
470 // If no struct field offsets were found, tentatively assume that
471 // field zero was selected (since the zero offset would obviously
474 ElTy = STy->getTypeAtIndex(0u);
475 GepIndices.push_back(
476 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
480 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
481 ElTy = ATy->getElementType();
486 // If none of the operands were convertible to proper GEP indices, cast
487 // the base to i8* and do an ugly getelementptr with that. It's still
488 // better than ptrtoint+arithmetic+inttoptr at least.
489 if (!AnyNonZeroIndices) {
490 // Cast the base to i8*.
491 V = InsertNoopCastOfTo(V,
492 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
494 assert(!isa<Instruction>(V) ||
495 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
497 // Expand the operands for a plain byte offset.
498 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
500 // Fold a GEP with constant operands.
501 if (Constant *CLHS = dyn_cast<Constant>(V))
502 if (Constant *CRHS = dyn_cast<Constant>(Idx))
503 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
506 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
507 unsigned ScanLimit = 6;
508 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
509 // Scanning starts from the last instruction before the insertion point.
510 BasicBlock::iterator IP = Builder.GetInsertPoint();
511 if (IP != BlockBegin) {
513 for (; ScanLimit; --IP, --ScanLimit) {
514 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
516 if (isa<DbgInfoIntrinsic>(IP))
518 if (IP->getOpcode() == Instruction::GetElementPtr &&
519 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
521 if (IP == BlockBegin) break;
525 // Save the original insertion point so we can restore it when we're done.
526 SCEVInsertPointGuard Guard(Builder, this);
528 // Move the insertion point out of as many loops as we can.
529 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
530 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
531 BasicBlock *Preheader = L->getLoopPreheader();
532 if (!Preheader) break;
534 // Ok, move up a level.
535 Builder.SetInsertPoint(Preheader->getTerminator());
539 Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
540 rememberInstruction(GEP);
546 SCEVInsertPointGuard Guard(Builder, this);
548 // Move the insertion point out of as many loops as we can.
549 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
550 if (!L->isLoopInvariant(V)) break;
552 bool AnyIndexNotLoopInvariant = any_of(
553 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
555 if (AnyIndexNotLoopInvariant)
558 BasicBlock *Preheader = L->getLoopPreheader();
559 if (!Preheader) break;
561 // Ok, move up a level.
562 Builder.SetInsertPoint(Preheader->getTerminator());
565 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
566 // because ScalarEvolution may have changed the address arithmetic to
567 // compute a value which is beyond the end of the allocated object.
569 if (V->getType() != PTy)
570 Casted = InsertNoopCastOfTo(Casted, PTy);
571 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
572 Ops.push_back(SE.getUnknown(GEP));
573 rememberInstruction(GEP);
576 return expand(SE.getAddExpr(Ops));
579 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
580 /// SCEV expansion. If they are nested, this is the most nested. If they are
581 /// neighboring, pick the later.
582 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
586 if (A->contains(B)) return B;
587 if (B->contains(A)) return A;
588 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
589 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
590 return A; // Arbitrarily break the tie.
593 /// getRelevantLoop - Get the most relevant loop associated with the given
594 /// expression, according to PickMostRelevantLoop.
595 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
596 // Test whether we've already computed the most relevant loop for this SCEV.
597 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
599 return Pair.first->second;
601 if (isa<SCEVConstant>(S))
602 // A constant has no relevant loops.
604 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
605 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
606 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
607 // A non-instruction has no relevant loops.
610 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
611 const Loop *L = nullptr;
612 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
614 for (const SCEV *Op : N->operands())
615 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
616 return RelevantLoops[N] = L;
618 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
619 const Loop *Result = getRelevantLoop(C->getOperand());
620 return RelevantLoops[C] = Result;
622 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
623 const Loop *Result = PickMostRelevantLoop(
624 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
625 return RelevantLoops[D] = Result;
627 llvm_unreachable("Unexpected SCEV type!");
632 /// LoopCompare - Compare loops by PickMostRelevantLoop.
636 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
638 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
639 std::pair<const Loop *, const SCEV *> RHS) const {
640 // Keep pointer operands sorted at the end.
641 if (LHS.second->getType()->isPointerTy() !=
642 RHS.second->getType()->isPointerTy())
643 return LHS.second->getType()->isPointerTy();
645 // Compare loops with PickMostRelevantLoop.
646 if (LHS.first != RHS.first)
647 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
649 // If one operand is a non-constant negative and the other is not,
650 // put the non-constant negative on the right so that a sub can
651 // be used instead of a negate and add.
652 if (LHS.second->isNonConstantNegative()) {
653 if (!RHS.second->isNonConstantNegative())
655 } else if (RHS.second->isNonConstantNegative())
658 // Otherwise they are equivalent according to this comparison.
665 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
666 Type *Ty = SE.getEffectiveSCEVType(S->getType());
668 // Collect all the add operands in a loop, along with their associated loops.
669 // Iterate in reverse so that constants are emitted last, all else equal, and
670 // so that pointer operands are inserted first, which the code below relies on
671 // to form more involved GEPs.
672 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
673 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
674 E(S->op_begin()); I != E; ++I)
675 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
677 // Sort by loop. Use a stable sort so that constants follow non-constants and
678 // pointer operands precede non-pointer operands.
679 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
681 // Emit instructions to add all the operands. Hoist as much as possible
682 // out of loops, and form meaningful getelementptrs where possible.
683 Value *Sum = nullptr;
684 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
685 const Loop *CurLoop = I->first;
686 const SCEV *Op = I->second;
688 // This is the first operand. Just expand it.
691 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
692 // The running sum expression is a pointer. Try to form a getelementptr
693 // at this level with that as the base.
694 SmallVector<const SCEV *, 4> NewOps;
695 for (; I != E && I->first == CurLoop; ++I) {
696 // If the operand is SCEVUnknown and not instructions, peek through
697 // it, to enable more of it to be folded into the GEP.
698 const SCEV *X = I->second;
699 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
700 if (!isa<Instruction>(U->getValue()))
701 X = SE.getSCEV(U->getValue());
704 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
705 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
706 // The running sum is an integer, and there's a pointer at this level.
707 // Try to form a getelementptr. If the running sum is instructions,
708 // use a SCEVUnknown to avoid re-analyzing them.
709 SmallVector<const SCEV *, 4> NewOps;
710 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
712 for (++I; I != E && I->first == CurLoop; ++I)
713 NewOps.push_back(I->second);
714 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
715 } else if (Op->isNonConstantNegative()) {
716 // Instead of doing a negate and add, just do a subtract.
717 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
718 Sum = InsertNoopCastOfTo(Sum, Ty);
719 Sum = InsertBinop(Instruction::Sub, Sum, W);
723 Value *W = expandCodeFor(Op, Ty);
724 Sum = InsertNoopCastOfTo(Sum, Ty);
725 // Canonicalize a constant to the RHS.
726 if (isa<Constant>(Sum)) std::swap(Sum, W);
727 Sum = InsertBinop(Instruction::Add, Sum, W);
735 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
736 Type *Ty = SE.getEffectiveSCEVType(S->getType());
738 // Collect all the mul operands in a loop, along with their associated loops.
739 // Iterate in reverse so that constants are emitted last, all else equal.
740 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
741 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
742 E(S->op_begin()); I != E; ++I)
743 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
745 // Sort by loop. Use a stable sort so that constants follow non-constants.
746 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
748 // Emit instructions to mul all the operands. Hoist as much as possible
750 Value *Prod = nullptr;
751 auto I = OpsAndLoops.begin();
753 // Expand the calculation of X pow N in the following manner:
754 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
755 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
756 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
758 // Calculate how many times the same operand from the same loop is included
760 uint64_t Exponent = 0;
761 const uint64_t MaxExponent = UINT64_MAX >> 1;
762 // No one sane will ever try to calculate such huge exponents, but if we
763 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
764 // below when the power of 2 exceeds our Exponent, and we want it to be
765 // 1u << 31 at most to not deal with unsigned overflow.
766 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
770 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
772 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
773 // that are needed into the result.
774 Value *P = expandCodeFor(I->second, Ty);
775 Value *Result = nullptr;
778 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
779 P = InsertBinop(Instruction::Mul, P, P);
780 if (Exponent & BinExp)
781 Result = Result ? InsertBinop(Instruction::Mul, Result, P) : P;
785 assert(Result && "Nothing was expanded?");
789 while (I != OpsAndLoops.end()) {
791 // This is the first operand. Just expand it.
792 Prod = ExpandOpBinPowN();
793 } else if (I->second->isAllOnesValue()) {
794 // Instead of doing a multiply by negative one, just do a negate.
795 Prod = InsertNoopCastOfTo(Prod, Ty);
796 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
800 Value *W = ExpandOpBinPowN();
801 Prod = InsertNoopCastOfTo(Prod, Ty);
802 // Canonicalize a constant to the RHS.
803 if (isa<Constant>(Prod)) std::swap(Prod, W);
805 if (match(W, m_Power2(RHS))) {
806 // Canonicalize Prod*(1<<C) to Prod<<C.
807 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
808 Prod = InsertBinop(Instruction::Shl, Prod,
809 ConstantInt::get(Ty, RHS->logBase2()));
811 Prod = InsertBinop(Instruction::Mul, Prod, W);
819 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
820 Type *Ty = SE.getEffectiveSCEVType(S->getType());
822 Value *LHS = expandCodeFor(S->getLHS(), Ty);
823 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
824 const APInt &RHS = SC->getAPInt();
825 if (RHS.isPowerOf2())
826 return InsertBinop(Instruction::LShr, LHS,
827 ConstantInt::get(Ty, RHS.logBase2()));
830 Value *RHS = expandCodeFor(S->getRHS(), Ty);
831 return InsertBinop(Instruction::UDiv, LHS, RHS);
834 /// Move parts of Base into Rest to leave Base with the minimal
835 /// expression that provides a pointer operand suitable for a
837 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
838 ScalarEvolution &SE) {
839 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
840 Base = A->getStart();
841 Rest = SE.getAddExpr(Rest,
842 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
843 A->getStepRecurrence(SE),
845 A->getNoWrapFlags(SCEV::FlagNW)));
847 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
848 Base = A->getOperand(A->getNumOperands()-1);
849 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
850 NewAddOps.back() = Rest;
851 Rest = SE.getAddExpr(NewAddOps);
852 ExposePointerBase(Base, Rest, SE);
856 /// Determine if this is a well-behaved chain of instructions leading back to
857 /// the PHI. If so, it may be reused by expanded expressions.
858 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
860 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
861 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
863 // If any of the operands don't dominate the insert position, bail.
864 // Addrec operands are always loop-invariant, so this can only happen
865 // if there are instructions which haven't been hoisted.
866 if (L == IVIncInsertLoop) {
867 for (User::op_iterator OI = IncV->op_begin()+1,
868 OE = IncV->op_end(); OI != OE; ++OI)
869 if (Instruction *OInst = dyn_cast<Instruction>(OI))
870 if (!SE.DT.dominates(OInst, IVIncInsertPos))
873 // Advance to the next instruction.
874 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
878 if (IncV->mayHaveSideEffects())
884 return isNormalAddRecExprPHI(PN, IncV, L);
887 /// getIVIncOperand returns an induction variable increment's induction
888 /// variable operand.
890 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
891 /// operands dominate InsertPos.
893 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
894 /// simple patterns generated by getAddRecExprPHILiterally and
895 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
896 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
897 Instruction *InsertPos,
899 if (IncV == InsertPos)
902 switch (IncV->getOpcode()) {
905 // Check for a simple Add/Sub or GEP of a loop invariant step.
906 case Instruction::Add:
907 case Instruction::Sub: {
908 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
909 if (!OInst || SE.DT.dominates(OInst, InsertPos))
910 return dyn_cast<Instruction>(IncV->getOperand(0));
913 case Instruction::BitCast:
914 return dyn_cast<Instruction>(IncV->getOperand(0));
915 case Instruction::GetElementPtr:
916 for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
917 if (isa<Constant>(*I))
919 if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
920 if (!SE.DT.dominates(OInst, InsertPos))
924 // allow any kind of GEP as long as it can be hoisted.
927 // This must be a pointer addition of constants (pretty), which is already
928 // handled, or some number of address-size elements (ugly). Ugly geps
929 // have 2 operands. i1* is used by the expander to represent an
930 // address-size element.
931 if (IncV->getNumOperands() != 2)
933 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
934 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
935 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
939 return dyn_cast<Instruction>(IncV->getOperand(0));
943 /// If the insert point of the current builder or any of the builders on the
944 /// stack of saved builders has 'I' as its insert point, update it to point to
945 /// the instruction after 'I'. This is intended to be used when the instruction
946 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
947 /// different block, the inconsistent insert point (with a mismatched
948 /// Instruction and Block) can lead to an instruction being inserted in a block
949 /// other than its parent.
950 void SCEVExpander::fixupInsertPoints(Instruction *I) {
951 BasicBlock::iterator It(*I);
952 BasicBlock::iterator NewInsertPt = std::next(It);
953 if (Builder.GetInsertPoint() == It)
954 Builder.SetInsertPoint(&*NewInsertPt);
955 for (auto *InsertPtGuard : InsertPointGuards)
956 if (InsertPtGuard->GetInsertPoint() == It)
957 InsertPtGuard->SetInsertPoint(NewInsertPt);
960 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
961 /// it available to other uses in this loop. Recursively hoist any operands,
962 /// until we reach a value that dominates InsertPos.
963 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
964 if (SE.DT.dominates(IncV, InsertPos))
967 // InsertPos must itself dominate IncV so that IncV's new position satisfies
968 // its existing users.
969 if (isa<PHINode>(InsertPos) ||
970 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
973 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
976 // Check that the chain of IV operands leading back to Phi can be hoisted.
977 SmallVector<Instruction*, 4> IVIncs;
979 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
982 // IncV is safe to hoist.
983 IVIncs.push_back(IncV);
985 if (SE.DT.dominates(IncV, InsertPos))
988 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
989 fixupInsertPoints(*I);
990 (*I)->moveBefore(InsertPos);
995 /// Determine if this cyclic phi is in a form that would have been generated by
996 /// LSR. We don't care if the phi was actually expanded in this pass, as long
997 /// as it is in a low-cost form, for example, no implied multiplication. This
998 /// should match any patterns generated by getAddRecExprPHILiterally and
1000 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1002 for(Instruction *IVOper = IncV;
1003 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1004 /*allowScale=*/false));) {
1011 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1012 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1013 /// need to materialize IV increments elsewhere to handle difficult situations.
1014 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1015 Type *ExpandTy, Type *IntTy,
1018 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1019 if (ExpandTy->isPointerTy()) {
1020 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1021 // If the step isn't constant, don't use an implicitly scaled GEP, because
1022 // that would require a multiply inside the loop.
1023 if (!isa<ConstantInt>(StepV))
1024 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1025 GEPPtrTy->getAddressSpace());
1026 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
1027 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
1028 if (IncV->getType() != PN->getType()) {
1029 IncV = Builder.CreateBitCast(IncV, PN->getType());
1030 rememberInstruction(IncV);
1033 IncV = useSubtract ?
1034 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1035 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1036 rememberInstruction(IncV);
1041 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the
1042 /// position. This routine assumes that this is possible (has been checked).
1043 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1044 Instruction *Pos, PHINode *LoopPhi) {
1046 if (DT->dominates(InstToHoist, Pos))
1048 // Make sure the increment is where we want it. But don't move it
1049 // down past a potential existing post-inc user.
1050 fixupInsertPoints(InstToHoist);
1051 InstToHoist->moveBefore(Pos);
1053 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1054 } while (InstToHoist != LoopPhi);
1057 /// \brief Check whether we can cheaply express the requested SCEV in terms of
1058 /// the available PHI SCEV by truncation and/or inversion of the step.
1059 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1060 const SCEVAddRecExpr *Phi,
1061 const SCEVAddRecExpr *Requested,
1063 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1064 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1066 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1069 // Try truncate it if necessary.
1070 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1074 // Check whether truncation will help.
1075 if (Phi == Requested) {
1080 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1081 if (SE.getAddExpr(Requested->getStart(),
1082 SE.getNegativeSCEV(Requested)) == Phi) {
1090 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1091 if (!isa<IntegerType>(AR->getType()))
1094 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1095 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1096 const SCEV *Step = AR->getStepRecurrence(SE);
1097 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1098 SE.getSignExtendExpr(AR, WideTy));
1099 const SCEV *ExtendAfterOp =
1100 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1101 return ExtendAfterOp == OpAfterExtend;
1104 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1105 if (!isa<IntegerType>(AR->getType()))
1108 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1109 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1110 const SCEV *Step = AR->getStepRecurrence(SE);
1111 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1112 SE.getZeroExtendExpr(AR, WideTy));
1113 const SCEV *ExtendAfterOp =
1114 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1115 return ExtendAfterOp == OpAfterExtend;
1118 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1119 /// the base addrec, which is the addrec without any non-loop-dominating
1120 /// values, and return the PHI.
1122 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1128 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1130 // Reuse a previously-inserted PHI, if present.
1131 BasicBlock *LatchBlock = L->getLoopLatch();
1133 PHINode *AddRecPhiMatch = nullptr;
1134 Instruction *IncV = nullptr;
1138 // Only try partially matching scevs that need truncation and/or
1139 // step-inversion if we know this loop is outside the current loop.
1140 bool TryNonMatchingSCEV =
1142 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1144 for (auto &I : *L->getHeader()) {
1145 auto *PN = dyn_cast<PHINode>(&I);
1146 if (!PN || !SE.isSCEVable(PN->getType()))
1149 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
1153 bool IsMatchingSCEV = PhiSCEV == Normalized;
1154 // We only handle truncation and inversion of phi recurrences for the
1155 // expanded expression if the expanded expression's loop dominates the
1156 // loop we insert to. Check now, so we can bail out early.
1157 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1160 Instruction *TempIncV =
1161 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1163 // Check whether we can reuse this PHI node.
1165 if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1167 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1170 if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1174 // Stop if we have found an exact match SCEV.
1175 if (IsMatchingSCEV) {
1179 AddRecPhiMatch = PN;
1183 // Try whether the phi can be translated into the requested form
1184 // (truncated and/or offset by a constant).
1185 if ((!TruncTy || InvertStep) &&
1186 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1187 // Record the phi node. But don't stop we might find an exact match
1189 AddRecPhiMatch = PN;
1191 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1195 if (AddRecPhiMatch) {
1196 // Potentially, move the increment. We have made sure in
1197 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1198 if (L == IVIncInsertLoop)
1199 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1201 // Ok, the add recurrence looks usable.
1202 // Remember this PHI, even in post-inc mode.
1203 InsertedValues.insert(AddRecPhiMatch);
1204 // Remember the increment.
1205 rememberInstruction(IncV);
1206 return AddRecPhiMatch;
1210 // Save the original insertion point so we can restore it when we're done.
1211 SCEVInsertPointGuard Guard(Builder, this);
1213 // Another AddRec may need to be recursively expanded below. For example, if
1214 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1215 // loop. Remove this loop from the PostIncLoops set before expanding such
1216 // AddRecs. Otherwise, we cannot find a valid position for the step
1217 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1218 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1219 // so it's not worth implementing SmallPtrSet::swap.
1220 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1221 PostIncLoops.clear();
1223 // Expand code for the start value into the loop preheader.
1224 assert(L->getLoopPreheader() &&
1225 "Can't expand add recurrences without a loop preheader!");
1226 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1227 L->getLoopPreheader()->getTerminator());
1229 // StartV must have been be inserted into L's preheader to dominate the new
1231 assert(!isa<Instruction>(StartV) ||
1232 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1235 // Expand code for the step value. Do this before creating the PHI so that PHI
1236 // reuse code doesn't see an incomplete PHI.
1237 const SCEV *Step = Normalized->getStepRecurrence(SE);
1238 // If the stride is negative, insert a sub instead of an add for the increment
1239 // (unless it's a constant, because subtracts of constants are canonicalized
1241 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1243 Step = SE.getNegativeSCEV(Step);
1244 // Expand the step somewhere that dominates the loop header.
1245 Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1247 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1248 // we actually do emit an addition. It does not apply if we emit a
1250 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1251 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1254 BasicBlock *Header = L->getHeader();
1255 Builder.SetInsertPoint(Header, Header->begin());
1256 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1257 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1258 Twine(IVName) + ".iv");
1259 rememberInstruction(PN);
1261 // Create the step instructions and populate the PHI.
1262 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1263 BasicBlock *Pred = *HPI;
1265 // Add a start value.
1266 if (!L->contains(Pred)) {
1267 PN->addIncoming(StartV, Pred);
1271 // Create a step value and add it to the PHI.
1272 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1273 // instructions at IVIncInsertPos.
1274 Instruction *InsertPos = L == IVIncInsertLoop ?
1275 IVIncInsertPos : Pred->getTerminator();
1276 Builder.SetInsertPoint(InsertPos);
1277 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1279 if (isa<OverflowingBinaryOperator>(IncV)) {
1281 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1283 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1285 PN->addIncoming(IncV, Pred);
1288 // After expanding subexpressions, restore the PostIncLoops set so the caller
1289 // can ensure that IVIncrement dominates the current uses.
1290 PostIncLoops = SavedPostIncLoops;
1292 // Remember this PHI, even in post-inc mode.
1293 InsertedValues.insert(PN);
1298 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1299 Type *STy = S->getType();
1300 Type *IntTy = SE.getEffectiveSCEVType(STy);
1301 const Loop *L = S->getLoop();
1303 // Determine a normalized form of this expression, which is the expression
1304 // before any post-inc adjustment is made.
1305 const SCEVAddRecExpr *Normalized = S;
1306 if (PostIncLoops.count(L)) {
1307 PostIncLoopSet Loops;
1309 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1312 // Strip off any non-loop-dominating component from the addrec start.
1313 const SCEV *Start = Normalized->getStart();
1314 const SCEV *PostLoopOffset = nullptr;
1315 if (!SE.properlyDominates(Start, L->getHeader())) {
1316 PostLoopOffset = Start;
1317 Start = SE.getConstant(Normalized->getType(), 0);
1318 Normalized = cast<SCEVAddRecExpr>(
1319 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1320 Normalized->getLoop(),
1321 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1324 // Strip off any non-loop-dominating component from the addrec step.
1325 const SCEV *Step = Normalized->getStepRecurrence(SE);
1326 const SCEV *PostLoopScale = nullptr;
1327 if (!SE.dominates(Step, L->getHeader())) {
1328 PostLoopScale = Step;
1329 Step = SE.getConstant(Normalized->getType(), 1);
1330 if (!Start->isZero()) {
1331 // The normalization below assumes that Start is constant zero, so if
1332 // it isn't re-associate Start to PostLoopOffset.
1333 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1334 PostLoopOffset = Start;
1335 Start = SE.getConstant(Normalized->getType(), 0);
1338 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1339 Start, Step, Normalized->getLoop(),
1340 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1343 // Expand the core addrec. If we need post-loop scaling, force it to
1344 // expand to an integer type to avoid the need for additional casting.
1345 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1346 // We can't use a pointer type for the addrec if the pointer type is
1348 Type *AddRecPHIExpandTy =
1349 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1351 // In some cases, we decide to reuse an existing phi node but need to truncate
1352 // it and/or invert the step.
1353 Type *TruncTy = nullptr;
1354 bool InvertStep = false;
1355 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1356 IntTy, TruncTy, InvertStep);
1358 // Accommodate post-inc mode, if necessary.
1360 if (!PostIncLoops.count(L))
1363 // In PostInc mode, use the post-incremented value.
1364 BasicBlock *LatchBlock = L->getLoopLatch();
1365 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1366 Result = PN->getIncomingValueForBlock(LatchBlock);
1368 // For an expansion to use the postinc form, the client must call
1369 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1370 // or dominated by IVIncInsertPos.
1371 if (isa<Instruction>(Result) &&
1372 !SE.DT.dominates(cast<Instruction>(Result),
1373 &*Builder.GetInsertPoint())) {
1374 // The induction variable's postinc expansion does not dominate this use.
1375 // IVUsers tries to prevent this case, so it is rare. However, it can
1376 // happen when an IVUser outside the loop is not dominated by the latch
1377 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1378 // all cases. Consider a phi outide whose operand is replaced during
1379 // expansion with the value of the postinc user. Without fundamentally
1380 // changing the way postinc users are tracked, the only remedy is
1381 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1382 // but hopefully expandCodeFor handles that.
1384 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1386 Step = SE.getNegativeSCEV(Step);
1389 // Expand the step somewhere that dominates the loop header.
1390 SCEVInsertPointGuard Guard(Builder, this);
1391 StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1393 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1397 // We have decided to reuse an induction variable of a dominating loop. Apply
1398 // truncation and/or invertion of the step.
1400 Type *ResTy = Result->getType();
1401 // Normalize the result type.
1402 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1403 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1404 // Truncate the result.
1405 if (TruncTy != Result->getType()) {
1406 Result = Builder.CreateTrunc(Result, TruncTy);
1407 rememberInstruction(Result);
1409 // Invert the result.
1411 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1413 rememberInstruction(Result);
1417 // Re-apply any non-loop-dominating scale.
1418 if (PostLoopScale) {
1419 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1420 Result = InsertNoopCastOfTo(Result, IntTy);
1421 Result = Builder.CreateMul(Result,
1422 expandCodeFor(PostLoopScale, IntTy));
1423 rememberInstruction(Result);
1426 // Re-apply any non-loop-dominating offset.
1427 if (PostLoopOffset) {
1428 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1429 if (Result->getType()->isIntegerTy()) {
1430 Value *Base = expandCodeFor(PostLoopOffset, ExpandTy);
1431 const SCEV *const OffsetArray[1] = {SE.getUnknown(Result)};
1432 Result = expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Base);
1434 const SCEV *const OffsetArray[1] = {PostLoopOffset};
1436 expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Result);
1439 Result = InsertNoopCastOfTo(Result, IntTy);
1440 Result = Builder.CreateAdd(Result,
1441 expandCodeFor(PostLoopOffset, IntTy));
1442 rememberInstruction(Result);
1449 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1450 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1452 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1453 const Loop *L = S->getLoop();
1455 // First check for an existing canonical IV in a suitable type.
1456 PHINode *CanonicalIV = nullptr;
1457 if (PHINode *PN = L->getCanonicalInductionVariable())
1458 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1461 // Rewrite an AddRec in terms of the canonical induction variable, if
1462 // its type is more narrow.
1464 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1465 SE.getTypeSizeInBits(Ty)) {
1466 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1467 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1468 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1469 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1470 S->getNoWrapFlags(SCEV::FlagNW)));
1471 BasicBlock::iterator NewInsertPt =
1472 findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock());
1473 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1478 // {X,+,F} --> X + {0,+,F}
1479 if (!S->getStart()->isZero()) {
1480 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1481 NewOps[0] = SE.getConstant(Ty, 0);
1482 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1483 S->getNoWrapFlags(SCEV::FlagNW));
1485 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1486 // comments on expandAddToGEP for details.
1487 const SCEV *Base = S->getStart();
1488 const SCEV *RestArray[1] = { Rest };
1489 // Dig into the expression to find the pointer base for a GEP.
1490 ExposePointerBase(Base, RestArray[0], SE);
1491 // If we found a pointer, expand the AddRec with a GEP.
1492 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1493 // Make sure the Base isn't something exotic, such as a multiplied
1494 // or divided pointer value. In those cases, the result type isn't
1495 // actually a pointer type.
1496 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1497 Value *StartV = expand(Base);
1498 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1499 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1503 // Just do a normal add. Pre-expand the operands to suppress folding.
1505 // The LHS and RHS values are factored out of the expand call to make the
1506 // output independent of the argument evaluation order.
1507 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1508 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1509 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1512 // If we don't yet have a canonical IV, create one.
1514 // Create and insert the PHI node for the induction variable in the
1516 BasicBlock *Header = L->getHeader();
1517 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1518 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1520 rememberInstruction(CanonicalIV);
1522 SmallSet<BasicBlock *, 4> PredSeen;
1523 Constant *One = ConstantInt::get(Ty, 1);
1524 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1525 BasicBlock *HP = *HPI;
1526 if (!PredSeen.insert(HP).second) {
1527 // There must be an incoming value for each predecessor, even the
1529 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1533 if (L->contains(HP)) {
1534 // Insert a unit add instruction right before the terminator
1535 // corresponding to the back-edge.
1536 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1538 HP->getTerminator());
1539 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1540 rememberInstruction(Add);
1541 CanonicalIV->addIncoming(Add, HP);
1543 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1548 // {0,+,1} --> Insert a canonical induction variable into the loop!
1549 if (S->isAffine() && S->getOperand(1)->isOne()) {
1550 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1551 "IVs with types different from the canonical IV should "
1552 "already have been handled!");
1556 // {0,+,F} --> {0,+,1} * F
1558 // If this is a simple linear addrec, emit it now as a special case.
1559 if (S->isAffine()) // {0,+,F} --> i*F
1561 expand(SE.getTruncateOrNoop(
1562 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1563 SE.getNoopOrAnyExtend(S->getOperand(1),
1564 CanonicalIV->getType())),
1567 // If this is a chain of recurrences, turn it into a closed form, using the
1568 // folders, then expandCodeFor the closed form. This allows the folders to
1569 // simplify the expression without having to build a bunch of special code
1570 // into this folder.
1571 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1573 // Promote S up to the canonical IV type, if the cast is foldable.
1574 const SCEV *NewS = S;
1575 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1576 if (isa<SCEVAddRecExpr>(Ext))
1579 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1580 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1582 // Truncate the result down to the original type, if needed.
1583 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1587 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1588 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1589 Value *V = expandCodeFor(S->getOperand(),
1590 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1591 Value *I = Builder.CreateTrunc(V, Ty);
1592 rememberInstruction(I);
1596 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1597 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1598 Value *V = expandCodeFor(S->getOperand(),
1599 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1600 Value *I = Builder.CreateZExt(V, Ty);
1601 rememberInstruction(I);
1605 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1606 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1607 Value *V = expandCodeFor(S->getOperand(),
1608 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1609 Value *I = Builder.CreateSExt(V, Ty);
1610 rememberInstruction(I);
1614 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1615 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1616 Type *Ty = LHS->getType();
1617 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1618 // In the case of mixed integer and pointer types, do the
1619 // rest of the comparisons as integer.
1620 if (S->getOperand(i)->getType() != Ty) {
1621 Ty = SE.getEffectiveSCEVType(Ty);
1622 LHS = InsertNoopCastOfTo(LHS, Ty);
1624 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1625 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1626 rememberInstruction(ICmp);
1627 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1628 rememberInstruction(Sel);
1631 // In the case of mixed integer and pointer types, cast the
1632 // final result back to the pointer type.
1633 if (LHS->getType() != S->getType())
1634 LHS = InsertNoopCastOfTo(LHS, S->getType());
1638 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1639 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1640 Type *Ty = LHS->getType();
1641 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1642 // In the case of mixed integer and pointer types, do the
1643 // rest of the comparisons as integer.
1644 if (S->getOperand(i)->getType() != Ty) {
1645 Ty = SE.getEffectiveSCEVType(Ty);
1646 LHS = InsertNoopCastOfTo(LHS, Ty);
1648 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1649 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1650 rememberInstruction(ICmp);
1651 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1652 rememberInstruction(Sel);
1655 // In the case of mixed integer and pointer types, cast the
1656 // final result back to the pointer type.
1657 if (LHS->getType() != S->getType())
1658 LHS = InsertNoopCastOfTo(LHS, S->getType());
1662 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1665 return expandCodeFor(SH, Ty);
1668 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1669 // Expand the code for this SCEV.
1670 Value *V = expand(SH);
1672 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1673 "non-trivial casts should be done with the SCEVs directly!");
1674 V = InsertNoopCastOfTo(V, Ty);
1679 ScalarEvolution::ValueOffsetPair
1680 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1681 const Instruction *InsertPt) {
1682 SetVector<ScalarEvolution::ValueOffsetPair> *Set = SE.getSCEVValues(S);
1683 // If the expansion is not in CanonicalMode, and the SCEV contains any
1684 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1685 if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1686 // If S is scConstant, it may be worse to reuse an existing Value.
1687 if (S->getSCEVType() != scConstant && Set) {
1688 // Choose a Value from the set which dominates the insertPt.
1689 // insertPt should be inside the Value's parent loop so as not to break
1691 for (auto const &VOPair : *Set) {
1692 Value *V = VOPair.first;
1693 ConstantInt *Offset = VOPair.second;
1694 Instruction *EntInst = nullptr;
1695 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1696 S->getType() == V->getType() &&
1697 EntInst->getFunction() == InsertPt->getFunction() &&
1698 SE.DT.dominates(EntInst, InsertPt) &&
1699 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1700 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1705 return {nullptr, nullptr};
1708 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1709 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1710 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1711 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1712 // the expansion will try to reuse Value from ExprValueMap, and only when it
1713 // fails, expand the SCEV literally.
1714 Value *SCEVExpander::expand(const SCEV *S) {
1715 // Compute an insertion point for this SCEV object. Hoist the instructions
1716 // as far out in the loop nest as possible.
1717 Instruction *InsertPt = &*Builder.GetInsertPoint();
1718 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1719 L = L->getParentLoop())
1720 if (SE.isLoopInvariant(S, L)) {
1722 if (BasicBlock *Preheader = L->getLoopPreheader())
1723 InsertPt = Preheader->getTerminator();
1725 // LSR sets the insertion point for AddRec start/step values to the
1726 // block start to simplify value reuse, even though it's an invalid
1727 // position. SCEVExpander must correct for this in all cases.
1728 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1731 // If the SCEV is computable at this level, insert it into the header
1732 // after the PHIs (and after any other instructions that we've inserted
1733 // there) so that it is guaranteed to dominate any user inside the loop.
1734 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1735 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1736 while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1737 (isInsertedInstruction(InsertPt) ||
1738 isa<DbgInfoIntrinsic>(InsertPt))) {
1739 InsertPt = &*std::next(InsertPt->getIterator());
1744 // Check to see if we already expanded this here.
1745 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1746 if (I != InsertedExpressions.end())
1749 SCEVInsertPointGuard Guard(Builder, this);
1750 Builder.SetInsertPoint(InsertPt);
1752 // Expand the expression into instructions.
1753 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1754 Value *V = VO.first;
1758 else if (VO.second) {
1759 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1760 Type *Ety = Vty->getPointerElementType();
1761 int64_t Offset = VO.second->getSExtValue();
1762 int64_t ESize = SE.getTypeSizeInBits(Ety);
1763 if ((Offset * 8) % ESize == 0) {
1765 ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
1766 V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
1769 ConstantInt::getSigned(VO.second->getType(), -Offset);
1770 unsigned AS = Vty->getAddressSpace();
1771 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
1772 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
1774 V = Builder.CreateBitCast(V, Vty);
1777 V = Builder.CreateSub(V, VO.second);
1780 // Remember the expanded value for this SCEV at this location.
1782 // This is independent of PostIncLoops. The mapped value simply materializes
1783 // the expression at this insertion point. If the mapped value happened to be
1784 // a postinc expansion, it could be reused by a non-postinc user, but only if
1785 // its insertion point was already at the head of the loop.
1786 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1790 void SCEVExpander::rememberInstruction(Value *I) {
1791 if (!PostIncLoops.empty())
1792 InsertedPostIncValues.insert(I);
1794 InsertedValues.insert(I);
1797 /// getOrInsertCanonicalInductionVariable - This method returns the
1798 /// canonical induction variable of the specified type for the specified
1799 /// loop (inserting one if there is none). A canonical induction variable
1800 /// starts at zero and steps by one on each iteration.
1802 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1804 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1806 // Build a SCEV for {0,+,1}<L>.
1807 // Conservatively use FlagAnyWrap for now.
1808 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1809 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1811 // Emit code for it.
1812 SCEVInsertPointGuard Guard(Builder, this);
1814 cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
1819 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1820 /// replace them with their most canonical representative. Return the number of
1821 /// phis eliminated.
1823 /// This does not depend on any SCEVExpander state but should be used in
1824 /// the same context that SCEVExpander is used.
1826 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1827 SmallVectorImpl<WeakTrackingVH> &DeadInsts,
1828 const TargetTransformInfo *TTI) {
1829 // Find integer phis in order of increasing width.
1830 SmallVector<PHINode*, 8> Phis;
1831 for (auto &I : *L->getHeader()) {
1832 if (auto *PN = dyn_cast<PHINode>(&I))
1839 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
1840 // Put pointers at the back and make sure pointer < pointer = false.
1841 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1842 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1843 return RHS->getType()->getPrimitiveSizeInBits() <
1844 LHS->getType()->getPrimitiveSizeInBits();
1847 unsigned NumElim = 0;
1848 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1849 // Process phis from wide to narrow. Map wide phis to their truncation
1850 // so narrow phis can reuse them.
1851 for (PHINode *Phi : Phis) {
1852 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1853 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1855 if (!SE.isSCEVable(PN->getType()))
1857 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1860 return Const->getValue();
1863 // Fold constant phis. They may be congruent to other constant phis and
1864 // would confuse the logic below that expects proper IVs.
1865 if (Value *V = SimplifyPHINode(Phi)) {
1866 if (V->getType() != Phi->getType())
1868 Phi->replaceAllUsesWith(V);
1869 DeadInsts.emplace_back(Phi);
1871 DEBUG_WITH_TYPE(DebugType, dbgs()
1872 << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1876 if (!SE.isSCEVable(Phi->getType()))
1879 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1882 if (Phi->getType()->isIntegerTy() && TTI &&
1883 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1884 // This phi can be freely truncated to the narrowest phi type. Map the
1885 // truncated expression to it so it will be reused for narrow types.
1886 const SCEV *TruncExpr =
1887 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1888 ExprToIVMap[TruncExpr] = Phi;
1893 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1895 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1898 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1899 Instruction *OrigInc = dyn_cast<Instruction>(
1900 OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1901 Instruction *IsomorphicInc =
1902 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1904 if (OrigInc && IsomorphicInc) {
1905 // If this phi has the same width but is more canonical, replace the
1906 // original with it. As part of the "more canonical" determination,
1907 // respect a prior decision to use an IV chain.
1908 if (OrigPhiRef->getType() == Phi->getType() &&
1909 !(ChainedPhis.count(Phi) ||
1910 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1911 (ChainedPhis.count(Phi) ||
1912 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1913 std::swap(OrigPhiRef, Phi);
1914 std::swap(OrigInc, IsomorphicInc);
1916 // Replacing the congruent phi is sufficient because acyclic
1917 // redundancy elimination, CSE/GVN, should handle the
1918 // rest. However, once SCEV proves that a phi is congruent,
1919 // it's often the head of an IV user cycle that is isomorphic
1920 // with the original phi. It's worth eagerly cleaning up the
1921 // common case of a single IV increment so that DeleteDeadPHIs
1922 // can remove cycles that had postinc uses.
1923 const SCEV *TruncExpr =
1924 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1925 if (OrigInc != IsomorphicInc &&
1926 TruncExpr == SE.getSCEV(IsomorphicInc) &&
1927 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
1928 hoistIVInc(OrigInc, IsomorphicInc)) {
1929 DEBUG_WITH_TYPE(DebugType,
1930 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1931 << *IsomorphicInc << '\n');
1932 Value *NewInc = OrigInc;
1933 if (OrigInc->getType() != IsomorphicInc->getType()) {
1934 Instruction *IP = nullptr;
1935 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1936 IP = &*PN->getParent()->getFirstInsertionPt();
1938 IP = OrigInc->getNextNode();
1940 IRBuilder<> Builder(IP);
1941 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1942 NewInc = Builder.CreateTruncOrBitCast(
1943 OrigInc, IsomorphicInc->getType(), IVName);
1945 IsomorphicInc->replaceAllUsesWith(NewInc);
1946 DeadInsts.emplace_back(IsomorphicInc);
1950 DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: "
1953 Value *NewIV = OrigPhiRef;
1954 if (OrigPhiRef->getType() != Phi->getType()) {
1955 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
1956 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1957 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1959 Phi->replaceAllUsesWith(NewIV);
1960 DeadInsts.emplace_back(Phi);
1965 Value *SCEVExpander::getExactExistingExpansion(const SCEV *S,
1966 const Instruction *At, Loop *L) {
1967 Optional<ScalarEvolution::ValueOffsetPair> VO =
1968 getRelatedExistingExpansion(S, At, L);
1969 if (VO && VO.getValue().second == nullptr)
1970 return VO.getValue().first;
1974 Optional<ScalarEvolution::ValueOffsetPair>
1975 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
1977 using namespace llvm::PatternMatch;
1979 SmallVector<BasicBlock *, 4> ExitingBlocks;
1980 L->getExitingBlocks(ExitingBlocks);
1982 // Look for suitable value in simple conditions at the loop exits.
1983 for (BasicBlock *BB : ExitingBlocks) {
1984 ICmpInst::Predicate Pred;
1985 Instruction *LHS, *RHS;
1986 BasicBlock *TrueBB, *FalseBB;
1988 if (!match(BB->getTerminator(),
1989 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1993 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1994 return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
1996 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1997 return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2000 // Use expand's logic which is used for reusing a previous Value in
2002 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2006 // There is potential to make this significantly smarter, but this simple
2007 // heuristic already gets some interesting cases.
2009 // Can not find suitable value.
2013 bool SCEVExpander::isHighCostExpansionHelper(
2014 const SCEV *S, Loop *L, const Instruction *At,
2015 SmallPtrSetImpl<const SCEV *> &Processed) {
2017 // If we can find an existing value for this scev available at the point "At"
2018 // then consider the expression cheap.
2019 if (At && getRelatedExistingExpansion(S, At, L))
2022 // Zero/One operand expressions
2023 switch (S->getSCEVType()) {
2028 return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
2031 return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
2034 return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
2038 if (!Processed.insert(S).second)
2041 if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
2042 // If the divisor is a power of two and the SCEV type fits in a native
2043 // integer, consider the division cheap irrespective of whether it occurs in
2044 // the user code since it can be lowered into a right shift.
2045 if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
2046 if (SC->getAPInt().isPowerOf2()) {
2047 const DataLayout &DL =
2048 L->getHeader()->getParent()->getParent()->getDataLayout();
2049 unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
2050 return DL.isIllegalInteger(Width);
2053 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2054 // HowManyLessThans produced to compute a precise expression, rather than a
2055 // UDiv from the user's code. If we can't find a UDiv in the code with some
2056 // simple searching, assume the former consider UDivExpr expensive to
2058 BasicBlock *ExitingBB = L->getExitingBlock();
2062 // At the beginning of this function we already tried to find existing value
2063 // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
2064 // involving division. This is just a simple search heuristic.
2066 At = &ExitingBB->back();
2067 if (!getRelatedExistingExpansion(
2068 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L))
2072 // HowManyLessThans uses a Max expression whenever the loop is not guarded by
2073 // the exit condition.
2074 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
2077 // Recurse past nary expressions, which commonly occur in the
2078 // BackedgeTakenCount. They may already exist in program code, and if not,
2079 // they are not too expensive rematerialize.
2080 if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
2081 for (auto *Op : NAry->operands())
2082 if (isHighCostExpansionHelper(Op, L, At, Processed))
2086 // If we haven't recognized an expensive SCEV pattern, assume it's an
2087 // expression produced by program code.
2091 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2094 switch (Pred->getKind()) {
2095 case SCEVPredicate::P_Union:
2096 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2097 case SCEVPredicate::P_Equal:
2098 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2099 case SCEVPredicate::P_Wrap: {
2100 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2101 return expandWrapPredicate(AddRecPred, IP);
2104 llvm_unreachable("Unknown SCEV predicate type");
2107 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2109 Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP);
2110 Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP);
2112 Builder.SetInsertPoint(IP);
2113 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2117 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2118 Instruction *Loc, bool Signed) {
2119 assert(AR->isAffine() && "Cannot generate RT check for "
2120 "non-affine expression");
2122 SCEVUnionPredicate Pred;
2123 const SCEV *ExitCount =
2124 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2126 assert(ExitCount != SE.getCouldNotCompute() && "Invalid loop count");
2128 const SCEV *Step = AR->getStepRecurrence(SE);
2129 const SCEV *Start = AR->getStart();
2131 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2132 unsigned DstBits = SE.getTypeSizeInBits(AR->getType());
2134 // The expression {Start,+,Step} has nusw/nssw if
2135 // Step < 0, Start - |Step| * Backedge <= Start
2136 // Step >= 0, Start + |Step| * Backedge > Start
2137 // and |Step| * Backedge doesn't unsigned overflow.
2139 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2140 Builder.SetInsertPoint(Loc);
2141 Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc);
2144 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(AR->getType()));
2146 Value *StepValue = expandCodeFor(Step, Ty, Loc);
2147 Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc);
2148 Value *StartValue = expandCodeFor(Start, Ty, Loc);
2151 ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
2153 Builder.SetInsertPoint(Loc);
2155 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2156 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2158 // Get the backedge taken count and truncate or extended to the AR type.
2159 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2160 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2161 Intrinsic::umul_with_overflow, Ty);
2163 // Compute |Step| * Backedge
2164 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2165 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2166 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2169 // Start + |Step| * Backedge < Start
2170 // Start - |Step| * Backedge > Start
2171 Value *Add = Builder.CreateAdd(StartValue, MulV);
2172 Value *Sub = Builder.CreateSub(StartValue, MulV);
2174 Value *EndCompareGT = Builder.CreateICmp(
2175 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2177 Value *EndCompareLT = Builder.CreateICmp(
2178 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2180 // Select the answer based on the sign of Step.
2182 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2184 // If the backedge taken count type is larger than the AR type,
2185 // check that we don't drop any bits by truncating it. If we are
2186 // droping bits, then we have overflow (unless the step is zero).
2187 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2188 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2189 auto *BackedgeCheck =
2190 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2191 ConstantInt::get(Loc->getContext(), MaxVal));
2192 BackedgeCheck = Builder.CreateAnd(
2193 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2195 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2198 EndCheck = Builder.CreateOr(EndCheck, OfMul);
2202 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2204 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2205 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2207 // Add a check for NUSW
2208 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2209 NUSWCheck = generateOverflowCheck(A, IP, false);
2211 // Add a check for NSSW
2212 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2213 NSSWCheck = generateOverflowCheck(A, IP, true);
2215 if (NUSWCheck && NSSWCheck)
2216 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2224 return ConstantInt::getFalse(IP->getContext());
2227 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2229 auto *BoolType = IntegerType::get(IP->getContext(), 1);
2230 Value *Check = ConstantInt::getNullValue(BoolType);
2232 // Loop over all checks in this set.
2233 for (auto Pred : Union->getPredicates()) {
2234 auto *NextCheck = expandCodeForPredicate(Pred, IP);
2235 Builder.SetInsertPoint(IP);
2236 Check = Builder.CreateOr(Check, NextCheck);
2243 // Search for a SCEV subexpression that is not safe to expand. Any expression
2244 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2245 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2246 // instruction, but the important thing is that we prove the denominator is
2247 // nonzero before expansion.
2249 // IVUsers already checks that IV-derived expressions are safe. So this check is
2250 // only needed when the expression includes some subexpression that is not IV
2253 // Currently, we only allow division by a nonzero constant here. If this is
2254 // inadequate, we could easily allow division by SCEVUnknown by using
2255 // ValueTracking to check isKnownNonZero().
2257 // We cannot generally expand recurrences unless the step dominates the loop
2258 // header. The expander handles the special case of affine recurrences by
2259 // scaling the recurrence outside the loop, but this technique isn't generally
2260 // applicable. Expanding a nested recurrence outside a loop requires computing
2261 // binomial coefficients. This could be done, but the recurrence has to be in a
2262 // perfectly reduced form, which can't be guaranteed.
2263 struct SCEVFindUnsafe {
2264 ScalarEvolution &SE;
2267 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2269 bool follow(const SCEV *S) {
2270 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2271 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2272 if (!SC || SC->getValue()->isZero()) {
2277 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2278 const SCEV *Step = AR->getStepRecurrence(SE);
2279 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2286 bool isDone() const { return IsUnsafe; }
2291 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2292 SCEVFindUnsafe Search(SE);
2293 visitAll(S, Search);
2294 return !Search.IsUnsafe;