1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Transforms/Utils/LoopUtils.h"
33 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
34 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
36 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
41 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
42 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
43 cl::desc("When performing SCEV expansion only if it is cheap to do, this "
44 "controls the budget that is considered cheap (default = 4)"));
46 using namespace PatternMatch;
48 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
49 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
50 /// creating a new one.
51 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
52 Instruction::CastOps Op,
53 BasicBlock::iterator IP) {
54 // This function must be called with the builder having a valid insertion
55 // point. It doesn't need to be the actual IP where the uses of the returned
56 // cast will be added, but it must dominate such IP.
57 // We use this precondition to produce a cast that will dominate all its
58 // uses. In particular, this is crucial for the case where the builder's
59 // insertion point *is* the point where we were asked to put the cast.
60 // Since we don't know the builder's insertion point is actually
61 // where the uses will be added (only that it dominates it), we are
62 // not allowed to move it.
63 BasicBlock::iterator BIP = Builder.GetInsertPoint();
67 // Check to see if there is already a cast!
68 for (User *U : V->users()) {
69 if (U->getType() != Ty)
71 CastInst *CI = dyn_cast<CastInst>(U);
72 if (!CI || CI->getOpcode() != Op)
75 // Found a suitable cast that is at IP or comes before IP. Use it. Note that
76 // the cast must also properly dominate the Builder's insertion point.
77 if (IP->getParent() == CI->getParent() && &*BIP != CI &&
78 (&*IP == CI || CI->comesBefore(&*IP))) {
86 SCEVInsertPointGuard Guard(Builder, this);
87 Builder.SetInsertPoint(&*IP);
88 Ret = Builder.CreateCast(Op, V, Ty, V->getName());
91 // We assert at the end of the function since IP might point to an
92 // instruction with different dominance properties than a cast
93 // (an invoke for example) and not dominate BIP (but the cast does).
94 assert(!isa<Instruction>(Ret) ||
95 SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
101 SCEVExpander::findInsertPointAfter(Instruction *I,
102 Instruction *MustDominate) const {
103 BasicBlock::iterator IP = ++I->getIterator();
104 if (auto *II = dyn_cast<InvokeInst>(I))
105 IP = II->getNormalDest()->begin();
107 while (isa<PHINode>(IP))
110 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
112 } else if (isa<CatchSwitchInst>(IP)) {
113 IP = MustDominate->getParent()->getFirstInsertionPt();
115 assert(!IP->isEHPad() && "unexpected eh pad!");
118 // Adjust insert point to be after instructions inserted by the expander, so
119 // we can re-use already inserted instructions. Avoid skipping past the
120 // original \p MustDominate, in case it is an inserted instruction.
121 while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
128 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
129 // Cast the argument at the beginning of the entry block, after
130 // any bitcasts of other arguments.
131 if (Argument *A = dyn_cast<Argument>(V)) {
132 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
133 while ((isa<BitCastInst>(IP) &&
134 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
135 cast<BitCastInst>(IP)->getOperand(0) != A) ||
136 isa<DbgInfoIntrinsic>(IP))
141 // Cast the instruction immediately after the instruction.
142 if (Instruction *I = dyn_cast<Instruction>(V))
143 return findInsertPointAfter(I, &*Builder.GetInsertPoint());
145 // Otherwise, this must be some kind of a constant,
146 // so let's plop this cast into the function's entry block.
147 assert(isa<Constant>(V) &&
148 "Expected the cast argument to be a global/constant");
149 return Builder.GetInsertBlock()
152 .getFirstInsertionPt();
155 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
156 /// which must be possible with a noop cast, doing what we can to share
158 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
159 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
160 assert((Op == Instruction::BitCast ||
161 Op == Instruction::PtrToInt ||
162 Op == Instruction::IntToPtr) &&
163 "InsertNoopCastOfTo cannot perform non-noop casts!");
164 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
165 "InsertNoopCastOfTo cannot change sizes!");
167 // inttoptr only works for integral pointers. For non-integral pointers, we
168 // can create a GEP on i8* null with the integral value as index. Note that
169 // it is safe to use GEP of null instead of inttoptr here, because only
170 // expressions already based on a GEP of null should be converted to pointers
172 if (Op == Instruction::IntToPtr) {
173 auto *PtrTy = cast<PointerType>(Ty);
174 if (DL.isNonIntegralPointerType(PtrTy)) {
175 auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
176 assert(DL.getTypeAllocSize(Builder.getInt8Ty()) == 1 &&
177 "alloc size of i8 must by 1 byte for the GEP to be correct");
178 auto *GEP = Builder.CreateGEP(
179 Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
180 return Builder.CreateBitCast(GEP, Ty);
183 // Short-circuit unnecessary bitcasts.
184 if (Op == Instruction::BitCast) {
185 if (V->getType() == Ty)
187 if (CastInst *CI = dyn_cast<CastInst>(V)) {
188 if (CI->getOperand(0)->getType() == Ty)
189 return CI->getOperand(0);
192 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
193 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
194 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
195 if (CastInst *CI = dyn_cast<CastInst>(V))
196 if ((CI->getOpcode() == Instruction::PtrToInt ||
197 CI->getOpcode() == Instruction::IntToPtr) &&
198 SE.getTypeSizeInBits(CI->getType()) ==
199 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
200 return CI->getOperand(0);
201 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
202 if ((CE->getOpcode() == Instruction::PtrToInt ||
203 CE->getOpcode() == Instruction::IntToPtr) &&
204 SE.getTypeSizeInBits(CE->getType()) ==
205 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
206 return CE->getOperand(0);
209 // Fold a cast of a constant.
210 if (Constant *C = dyn_cast<Constant>(V))
211 return ConstantExpr::getCast(Op, C, Ty);
213 // Try to reuse existing cast, or insert one.
214 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
217 /// InsertBinop - Insert the specified binary operator, doing a small amount
218 /// of work to avoid inserting an obviously redundant operation, and hoisting
219 /// to an outer loop when the opportunity is there and it is safe.
220 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
221 Value *LHS, Value *RHS,
222 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
223 // Fold a binop with constant operands.
224 if (Constant *CLHS = dyn_cast<Constant>(LHS))
225 if (Constant *CRHS = dyn_cast<Constant>(RHS))
226 return ConstantExpr::get(Opcode, CLHS, CRHS);
228 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
229 unsigned ScanLimit = 6;
230 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
231 // Scanning starts from the last instruction before the insertion point.
232 BasicBlock::iterator IP = Builder.GetInsertPoint();
233 if (IP != BlockBegin) {
235 for (; ScanLimit; --IP, --ScanLimit) {
236 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
238 if (isa<DbgInfoIntrinsic>(IP))
241 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
242 // Ensure that no-wrap flags match.
243 if (isa<OverflowingBinaryOperator>(I)) {
244 if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
246 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
249 // Conservatively, do not use any instruction which has any of exact
251 if (isa<PossiblyExactOperator>(I) && I->isExact())
255 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
256 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
258 if (IP == BlockBegin) break;
262 // Save the original insertion point so we can restore it when we're done.
263 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
264 SCEVInsertPointGuard Guard(Builder, this);
267 // Move the insertion point out of as many loops as we can.
268 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
269 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
270 BasicBlock *Preheader = L->getLoopPreheader();
271 if (!Preheader) break;
273 // Ok, move up a level.
274 Builder.SetInsertPoint(Preheader->getTerminator());
278 // If we haven't found this binop, insert it.
279 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
280 BO->setDebugLoc(Loc);
281 if (Flags & SCEV::FlagNUW)
282 BO->setHasNoUnsignedWrap();
283 if (Flags & SCEV::FlagNSW)
284 BO->setHasNoSignedWrap();
289 /// FactorOutConstant - Test if S is divisible by Factor, using signed
290 /// division. If so, update S with Factor divided out and return true.
291 /// S need not be evenly divisible if a reasonable remainder can be
293 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
294 const SCEV *Factor, ScalarEvolution &SE,
295 const DataLayout &DL) {
296 // Everything is divisible by one.
302 S = SE.getConstant(S->getType(), 1);
306 // For a Constant, check for a multiple of the given factor.
307 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
311 // Check for divisibility.
312 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
314 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
315 // If the quotient is zero and the remainder is non-zero, reject
316 // the value at this scale. It will be considered for subsequent
319 const SCEV *Div = SE.getConstant(CI);
321 Remainder = SE.getAddExpr(
322 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
328 // In a Mul, check if there is a constant operand which is a multiple
329 // of the given factor.
330 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
331 // Size is known, check if there is a constant operand which is a multiple
332 // of the given factor. If so, we can factor it.
333 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
334 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
335 if (!C->getAPInt().srem(FC->getAPInt())) {
336 SmallVector<const SCEV *, 4> NewMulOps(M->operands());
337 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
338 S = SE.getMulExpr(NewMulOps);
343 // In an AddRec, check if both start and step are divisible.
344 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
345 const SCEV *Step = A->getStepRecurrence(SE);
346 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
347 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
349 if (!StepRem->isZero())
351 const SCEV *Start = A->getStart();
352 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
354 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
355 A->getNoWrapFlags(SCEV::FlagNW));
362 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
363 /// is the number of SCEVAddRecExprs present, which are kept at the end of
366 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
368 ScalarEvolution &SE) {
369 unsigned NumAddRecs = 0;
370 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
372 // Group Ops into non-addrecs and addrecs.
373 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
374 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
375 // Let ScalarEvolution sort and simplify the non-addrecs list.
376 const SCEV *Sum = NoAddRecs.empty() ?
377 SE.getConstant(Ty, 0) :
378 SE.getAddExpr(NoAddRecs);
379 // If it returned an add, use the operands. Otherwise it simplified
380 // the sum into a single value, so just use that.
382 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
383 Ops.append(Add->op_begin(), Add->op_end());
384 else if (!Sum->isZero())
386 // Then append the addrecs.
387 Ops.append(AddRecs.begin(), AddRecs.end());
390 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
391 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
392 /// This helps expose more opportunities for folding parts of the expressions
393 /// into GEP indices.
395 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
397 ScalarEvolution &SE) {
399 SmallVector<const SCEV *, 8> AddRecs;
400 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
401 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
402 const SCEV *Start = A->getStart();
403 if (Start->isZero()) break;
404 const SCEV *Zero = SE.getConstant(Ty, 0);
405 AddRecs.push_back(SE.getAddRecExpr(Zero,
406 A->getStepRecurrence(SE),
408 A->getNoWrapFlags(SCEV::FlagNW)));
409 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
411 Ops.append(Add->op_begin(), Add->op_end());
412 e += Add->getNumOperands();
417 if (!AddRecs.empty()) {
418 // Add the addrecs onto the end of the list.
419 Ops.append(AddRecs.begin(), AddRecs.end());
420 // Resort the operand list, moving any constants to the front.
421 SimplifyAddOperands(Ops, Ty, SE);
425 /// expandAddToGEP - Expand an addition expression with a pointer type into
426 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
427 /// BasicAliasAnalysis and other passes analyze the result. See the rules
428 /// for getelementptr vs. inttoptr in
429 /// http://llvm.org/docs/LangRef.html#pointeraliasing
432 /// Design note: The correctness of using getelementptr here depends on
433 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
434 /// they may introduce pointer arithmetic which may not be safely converted
435 /// into getelementptr.
437 /// Design note: It might seem desirable for this function to be more
438 /// loop-aware. If some of the indices are loop-invariant while others
439 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
440 /// loop-invariant portions of the overall computation outside the loop.
441 /// However, there are a few reasons this is not done here. Hoisting simple
442 /// arithmetic is a low-level optimization that often isn't very
443 /// important until late in the optimization process. In fact, passes
444 /// like InstructionCombining will combine GEPs, even if it means
445 /// pushing loop-invariant computation down into loops, so even if the
446 /// GEPs were split here, the work would quickly be undone. The
447 /// LoopStrengthReduction pass, which is usually run quite late (and
448 /// after the last InstructionCombining pass), takes care of hoisting
449 /// loop-invariant portions of expressions, after considering what
450 /// can be folded using target addressing modes.
452 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
453 const SCEV *const *op_end,
457 SmallVector<Value *, 4> GepIndices;
458 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
459 bool AnyNonZeroIndices = false;
461 // Split AddRecs up into parts as either of the parts may be usable
462 // without the other.
463 SplitAddRecs(Ops, Ty, SE);
465 Type *IntIdxTy = DL.getIndexType(PTy);
467 // For opaque pointers, always generate i8 GEP.
468 if (!PTy->isOpaque()) {
469 // Descend down the pointer's type and attempt to convert the other
470 // operands into GEP indices, at each level. The first index in a GEP
471 // indexes into the array implied by the pointer operand; the rest of
472 // the indices index into the element or field type selected by the
474 Type *ElTy = PTy->getNonOpaquePointerElementType();
476 // If the scale size is not 0, attempt to factor out a scale for
478 SmallVector<const SCEV *, 8> ScaledOps;
479 if (ElTy->isSized()) {
480 const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
481 if (!ElSize->isZero()) {
482 SmallVector<const SCEV *, 8> NewOps;
483 for (const SCEV *Op : Ops) {
484 const SCEV *Remainder = SE.getConstant(Ty, 0);
485 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
486 // Op now has ElSize factored out.
487 ScaledOps.push_back(Op);
488 if (!Remainder->isZero())
489 NewOps.push_back(Remainder);
490 AnyNonZeroIndices = true;
492 // The operand was not divisible, so add it to the list of
493 // operands we'll scan next iteration.
494 NewOps.push_back(Op);
497 // If we made any changes, update Ops.
498 if (!ScaledOps.empty()) {
500 SimplifyAddOperands(Ops, Ty, SE);
505 // Record the scaled array index for this level of the type. If
506 // we didn't find any operands that could be factored, tentatively
507 // assume that element zero was selected (since the zero offset
508 // would obviously be folded away).
511 ? Constant::getNullValue(Ty)
512 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false);
513 GepIndices.push_back(Scaled);
515 // Collect struct field index operands.
516 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
517 bool FoundFieldNo = false;
518 // An empty struct has no fields.
519 if (STy->getNumElements() == 0) break;
520 // Field offsets are known. See if a constant offset falls within any of
521 // the struct fields.
524 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
525 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
526 const StructLayout &SL = *DL.getStructLayout(STy);
527 uint64_t FullOffset = C->getValue()->getZExtValue();
528 if (FullOffset < SL.getSizeInBytes()) {
529 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
530 GepIndices.push_back(
531 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
532 ElTy = STy->getTypeAtIndex(ElIdx);
534 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
535 AnyNonZeroIndices = true;
539 // If no struct field offsets were found, tentatively assume that
540 // field zero was selected (since the zero offset would obviously
543 ElTy = STy->getTypeAtIndex(0u);
544 GepIndices.push_back(
545 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
549 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
550 ElTy = ATy->getElementType();
552 // FIXME: Handle VectorType.
553 // E.g., If ElTy is scalable vector, then ElSize is not a compile-time
554 // constant, therefore can not be factored out. The generated IR is less
555 // ideal with base 'V' cast to i8* and do ugly getelementptr over that.
560 // If none of the operands were convertible to proper GEP indices, cast
561 // the base to i8* and do an ugly getelementptr with that. It's still
562 // better than ptrtoint+arithmetic+inttoptr at least.
563 if (!AnyNonZeroIndices) {
564 // Cast the base to i8*.
565 if (!PTy->isOpaque())
566 V = InsertNoopCastOfTo(V,
567 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
569 assert(!isa<Instruction>(V) ||
570 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
572 // Expand the operands for a plain byte offset.
573 Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false);
575 // Fold a GEP with constant operands.
576 if (Constant *CLHS = dyn_cast<Constant>(V))
577 if (Constant *CRHS = dyn_cast<Constant>(Idx))
578 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
581 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
582 unsigned ScanLimit = 6;
583 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
584 // Scanning starts from the last instruction before the insertion point.
585 BasicBlock::iterator IP = Builder.GetInsertPoint();
586 if (IP != BlockBegin) {
588 for (; ScanLimit; --IP, --ScanLimit) {
589 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
591 if (isa<DbgInfoIntrinsic>(IP))
593 if (IP->getOpcode() == Instruction::GetElementPtr &&
594 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
596 if (IP == BlockBegin) break;
600 // Save the original insertion point so we can restore it when we're done.
601 SCEVInsertPointGuard Guard(Builder, this);
603 // Move the insertion point out of as many loops as we can.
604 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
605 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
606 BasicBlock *Preheader = L->getLoopPreheader();
607 if (!Preheader) break;
609 // Ok, move up a level.
610 Builder.SetInsertPoint(Preheader->getTerminator());
614 return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
618 SCEVInsertPointGuard Guard(Builder, this);
620 // Move the insertion point out of as many loops as we can.
621 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
622 if (!L->isLoopInvariant(V)) break;
624 bool AnyIndexNotLoopInvariant = any_of(
625 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
627 if (AnyIndexNotLoopInvariant)
630 BasicBlock *Preheader = L->getLoopPreheader();
631 if (!Preheader) break;
633 // Ok, move up a level.
634 Builder.SetInsertPoint(Preheader->getTerminator());
637 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
638 // because ScalarEvolution may have changed the address arithmetic to
639 // compute a value which is beyond the end of the allocated object.
641 if (V->getType() != PTy)
642 Casted = InsertNoopCastOfTo(Casted, PTy);
643 Value *GEP = Builder.CreateGEP(PTy->getNonOpaquePointerElementType(),
644 Casted, GepIndices, "scevgep");
645 Ops.push_back(SE.getUnknown(GEP));
648 return expand(SE.getAddExpr(Ops));
651 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
653 const SCEV *const Ops[1] = {Op};
654 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
657 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
658 /// SCEV expansion. If they are nested, this is the most nested. If they are
659 /// neighboring, pick the later.
660 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
664 if (A->contains(B)) return B;
665 if (B->contains(A)) return A;
666 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
667 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
668 return A; // Arbitrarily break the tie.
671 /// getRelevantLoop - Get the most relevant loop associated with the given
672 /// expression, according to PickMostRelevantLoop.
673 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
674 // Test whether we've already computed the most relevant loop for this SCEV.
675 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
677 return Pair.first->second;
679 if (isa<SCEVConstant>(S))
680 // A constant has no relevant loops.
682 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
683 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
684 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
685 // A non-instruction has no relevant loops.
688 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
689 const Loop *L = nullptr;
690 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
692 for (const SCEV *Op : N->operands())
693 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
694 return RelevantLoops[N] = L;
696 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
697 const Loop *Result = getRelevantLoop(C->getOperand());
698 return RelevantLoops[C] = Result;
700 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
701 const Loop *Result = PickMostRelevantLoop(
702 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
703 return RelevantLoops[D] = Result;
705 llvm_unreachable("Unexpected SCEV type!");
710 /// LoopCompare - Compare loops by PickMostRelevantLoop.
714 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
716 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
717 std::pair<const Loop *, const SCEV *> RHS) const {
718 // Keep pointer operands sorted at the end.
719 if (LHS.second->getType()->isPointerTy() !=
720 RHS.second->getType()->isPointerTy())
721 return LHS.second->getType()->isPointerTy();
723 // Compare loops with PickMostRelevantLoop.
724 if (LHS.first != RHS.first)
725 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
727 // If one operand is a non-constant negative and the other is not,
728 // put the non-constant negative on the right so that a sub can
729 // be used instead of a negate and add.
730 if (LHS.second->isNonConstantNegative()) {
731 if (!RHS.second->isNonConstantNegative())
733 } else if (RHS.second->isNonConstantNegative())
736 // Otherwise they are equivalent according to this comparison.
743 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
744 Type *Ty = SE.getEffectiveSCEVType(S->getType());
746 // Collect all the add operands in a loop, along with their associated loops.
747 // Iterate in reverse so that constants are emitted last, all else equal, and
748 // so that pointer operands are inserted first, which the code below relies on
749 // to form more involved GEPs.
750 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
751 for (const SCEV *Op : reverse(S->operands()))
752 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
754 // Sort by loop. Use a stable sort so that constants follow non-constants and
755 // pointer operands precede non-pointer operands.
756 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
758 // Emit instructions to add all the operands. Hoist as much as possible
759 // out of loops, and form meaningful getelementptrs where possible.
760 Value *Sum = nullptr;
761 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
762 const Loop *CurLoop = I->first;
763 const SCEV *Op = I->second;
765 // This is the first operand. Just expand it.
771 assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
772 if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
773 // The running sum expression is a pointer. Try to form a getelementptr
774 // at this level with that as the base.
775 SmallVector<const SCEV *, 4> NewOps;
776 for (; I != E && I->first == CurLoop; ++I) {
777 // If the operand is SCEVUnknown and not instructions, peek through
778 // it, to enable more of it to be folded into the GEP.
779 const SCEV *X = I->second;
780 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
781 if (!isa<Instruction>(U->getValue()))
782 X = SE.getSCEV(U->getValue());
785 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
786 } else if (Op->isNonConstantNegative()) {
787 // Instead of doing a negate and add, just do a subtract.
788 Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false);
789 Sum = InsertNoopCastOfTo(Sum, Ty);
790 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
791 /*IsSafeToHoist*/ true);
795 Value *W = expandCodeForImpl(Op, Ty, false);
796 Sum = InsertNoopCastOfTo(Sum, Ty);
797 // Canonicalize a constant to the RHS.
798 if (isa<Constant>(Sum)) std::swap(Sum, W);
799 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
800 /*IsSafeToHoist*/ true);
808 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
809 Type *Ty = SE.getEffectiveSCEVType(S->getType());
811 // Collect all the mul operands in a loop, along with their associated loops.
812 // Iterate in reverse so that constants are emitted last, all else equal.
813 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
814 for (const SCEV *Op : reverse(S->operands()))
815 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
817 // Sort by loop. Use a stable sort so that constants follow non-constants.
818 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
820 // Emit instructions to mul all the operands. Hoist as much as possible
822 Value *Prod = nullptr;
823 auto I = OpsAndLoops.begin();
825 // Expand the calculation of X pow N in the following manner:
826 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
827 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
828 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
830 // Calculate how many times the same operand from the same loop is included
832 uint64_t Exponent = 0;
833 const uint64_t MaxExponent = UINT64_MAX >> 1;
834 // No one sane will ever try to calculate such huge exponents, but if we
835 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
836 // below when the power of 2 exceeds our Exponent, and we want it to be
837 // 1u << 31 at most to not deal with unsigned overflow.
838 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
842 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
844 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
845 // that are needed into the result.
846 Value *P = expandCodeForImpl(I->second, Ty, false);
847 Value *Result = nullptr;
850 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
851 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
852 /*IsSafeToHoist*/ true);
853 if (Exponent & BinExp)
854 Result = Result ? InsertBinop(Instruction::Mul, Result, P,
856 /*IsSafeToHoist*/ true)
861 assert(Result && "Nothing was expanded?");
865 while (I != OpsAndLoops.end()) {
867 // This is the first operand. Just expand it.
868 Prod = ExpandOpBinPowN();
869 } else if (I->second->isAllOnesValue()) {
870 // Instead of doing a multiply by negative one, just do a negate.
871 Prod = InsertNoopCastOfTo(Prod, Ty);
872 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
873 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
877 Value *W = ExpandOpBinPowN();
878 Prod = InsertNoopCastOfTo(Prod, Ty);
879 // Canonicalize a constant to the RHS.
880 if (isa<Constant>(Prod)) std::swap(Prod, W);
882 if (match(W, m_Power2(RHS))) {
883 // Canonicalize Prod*(1<<C) to Prod<<C.
884 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
885 auto NWFlags = S->getNoWrapFlags();
886 // clear nsw flag if shl will produce poison value.
887 if (RHS->logBase2() == RHS->getBitWidth() - 1)
888 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
889 Prod = InsertBinop(Instruction::Shl, Prod,
890 ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
891 /*IsSafeToHoist*/ true);
893 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
894 /*IsSafeToHoist*/ true);
902 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
903 Type *Ty = SE.getEffectiveSCEVType(S->getType());
905 Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false);
906 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
907 const APInt &RHS = SC->getAPInt();
908 if (RHS.isPowerOf2())
909 return InsertBinop(Instruction::LShr, LHS,
910 ConstantInt::get(Ty, RHS.logBase2()),
911 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
914 Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false);
915 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
916 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
919 /// Determine if this is a well-behaved chain of instructions leading back to
920 /// the PHI. If so, it may be reused by expanded expressions.
921 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
923 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
924 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
926 // If any of the operands don't dominate the insert position, bail.
927 // Addrec operands are always loop-invariant, so this can only happen
928 // if there are instructions which haven't been hoisted.
929 if (L == IVIncInsertLoop) {
930 for (Use &Op : llvm::drop_begin(IncV->operands()))
931 if (Instruction *OInst = dyn_cast<Instruction>(Op))
932 if (!SE.DT.dominates(OInst, IVIncInsertPos))
935 // Advance to the next instruction.
936 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
940 if (IncV->mayHaveSideEffects())
946 return isNormalAddRecExprPHI(PN, IncV, L);
949 /// getIVIncOperand returns an induction variable increment's induction
950 /// variable operand.
952 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
953 /// operands dominate InsertPos.
955 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
956 /// simple patterns generated by getAddRecExprPHILiterally and
957 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
958 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
959 Instruction *InsertPos,
961 if (IncV == InsertPos)
964 switch (IncV->getOpcode()) {
967 // Check for a simple Add/Sub or GEP of a loop invariant step.
968 case Instruction::Add:
969 case Instruction::Sub: {
970 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
971 if (!OInst || SE.DT.dominates(OInst, InsertPos))
972 return dyn_cast<Instruction>(IncV->getOperand(0));
975 case Instruction::BitCast:
976 return dyn_cast<Instruction>(IncV->getOperand(0));
977 case Instruction::GetElementPtr:
978 for (Use &U : llvm::drop_begin(IncV->operands())) {
979 if (isa<Constant>(U))
981 if (Instruction *OInst = dyn_cast<Instruction>(U)) {
982 if (!SE.DT.dominates(OInst, InsertPos))
986 // allow any kind of GEP as long as it can be hoisted.
989 // This must be a pointer addition of constants (pretty), which is already
990 // handled, or some number of address-size elements (ugly). Ugly geps
991 // have 2 operands. i1* is used by the expander to represent an
992 // address-size element.
993 if (IncV->getNumOperands() != 2)
995 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
996 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
997 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
1001 return dyn_cast<Instruction>(IncV->getOperand(0));
1005 /// If the insert point of the current builder or any of the builders on the
1006 /// stack of saved builders has 'I' as its insert point, update it to point to
1007 /// the instruction after 'I'. This is intended to be used when the instruction
1008 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
1009 /// different block, the inconsistent insert point (with a mismatched
1010 /// Instruction and Block) can lead to an instruction being inserted in a block
1011 /// other than its parent.
1012 void SCEVExpander::fixupInsertPoints(Instruction *I) {
1013 BasicBlock::iterator It(*I);
1014 BasicBlock::iterator NewInsertPt = std::next(It);
1015 if (Builder.GetInsertPoint() == It)
1016 Builder.SetInsertPoint(&*NewInsertPt);
1017 for (auto *InsertPtGuard : InsertPointGuards)
1018 if (InsertPtGuard->GetInsertPoint() == It)
1019 InsertPtGuard->SetInsertPoint(NewInsertPt);
1022 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
1023 /// it available to other uses in this loop. Recursively hoist any operands,
1024 /// until we reach a value that dominates InsertPos.
1025 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
1026 if (SE.DT.dominates(IncV, InsertPos))
1029 // InsertPos must itself dominate IncV so that IncV's new position satisfies
1030 // its existing users.
1031 if (isa<PHINode>(InsertPos) ||
1032 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1035 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1038 // Check that the chain of IV operands leading back to Phi can be hoisted.
1039 SmallVector<Instruction*, 4> IVIncs;
1041 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1044 // IncV is safe to hoist.
1045 IVIncs.push_back(IncV);
1047 if (SE.DT.dominates(IncV, InsertPos))
1050 for (Instruction *I : llvm::reverse(IVIncs)) {
1051 fixupInsertPoints(I);
1052 I->moveBefore(InsertPos);
1057 /// Determine if this cyclic phi is in a form that would have been generated by
1058 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1059 /// as it is in a low-cost form, for example, no implied multiplication. This
1060 /// should match any patterns generated by getAddRecExprPHILiterally and
1062 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1064 for(Instruction *IVOper = IncV;
1065 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1066 /*allowScale=*/false));) {
1073 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1074 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1075 /// need to materialize IV increments elsewhere to handle difficult situations.
1076 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1077 Type *ExpandTy, Type *IntTy,
1080 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1081 if (ExpandTy->isPointerTy()) {
1082 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1083 // If the step isn't constant, don't use an implicitly scaled GEP, because
1084 // that would require a multiply inside the loop.
1085 if (!isa<ConstantInt>(StepV))
1086 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1087 GEPPtrTy->getAddressSpace());
1088 IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1089 if (IncV->getType() != PN->getType())
1090 IncV = Builder.CreateBitCast(IncV, PN->getType());
1092 IncV = useSubtract ?
1093 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1094 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1099 /// Check whether we can cheaply express the requested SCEV in terms of
1100 /// the available PHI SCEV by truncation and/or inversion of the step.
1101 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1102 const SCEVAddRecExpr *Phi,
1103 const SCEVAddRecExpr *Requested,
1105 // We can't transform to match a pointer PHI.
1106 if (Phi->getType()->isPointerTy())
1109 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1110 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1112 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1115 // Try truncate it if necessary.
1116 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1120 // Check whether truncation will help.
1121 if (Phi == Requested) {
1126 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1127 if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
1135 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1136 if (!isa<IntegerType>(AR->getType()))
1139 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1140 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1141 const SCEV *Step = AR->getStepRecurrence(SE);
1142 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1143 SE.getSignExtendExpr(AR, WideTy));
1144 const SCEV *ExtendAfterOp =
1145 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1146 return ExtendAfterOp == OpAfterExtend;
1149 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1150 if (!isa<IntegerType>(AR->getType()))
1153 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1154 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1155 const SCEV *Step = AR->getStepRecurrence(SE);
1156 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1157 SE.getZeroExtendExpr(AR, WideTy));
1158 const SCEV *ExtendAfterOp =
1159 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1160 return ExtendAfterOp == OpAfterExtend;
1163 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1164 /// the base addrec, which is the addrec without any non-loop-dominating
1165 /// values, and return the PHI.
1167 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1173 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1175 // Reuse a previously-inserted PHI, if present.
1176 BasicBlock *LatchBlock = L->getLoopLatch();
1178 PHINode *AddRecPhiMatch = nullptr;
1179 Instruction *IncV = nullptr;
1183 // Only try partially matching scevs that need truncation and/or
1184 // step-inversion if we know this loop is outside the current loop.
1185 bool TryNonMatchingSCEV =
1187 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1189 for (PHINode &PN : L->getHeader()->phis()) {
1190 if (!SE.isSCEVable(PN.getType()))
1193 // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1194 // PHI has no meaning at all.
1195 if (!PN.isComplete()) {
1196 SCEV_DEBUG_WITH_TYPE(
1197 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1201 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1205 bool IsMatchingSCEV = PhiSCEV == Normalized;
1206 // We only handle truncation and inversion of phi recurrences for the
1207 // expanded expression if the expanded expression's loop dominates the
1208 // loop we insert to. Check now, so we can bail out early.
1209 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1212 // TODO: this possibly can be reworked to avoid this cast at all.
1213 Instruction *TempIncV =
1214 dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
1218 // Check whether we can reuse this PHI node.
1220 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1223 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1227 // Stop if we have found an exact match SCEV.
1228 if (IsMatchingSCEV) {
1232 AddRecPhiMatch = &PN;
1236 // Try whether the phi can be translated into the requested form
1237 // (truncated and/or offset by a constant).
1238 if ((!TruncTy || InvertStep) &&
1239 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1240 // Record the phi node. But don't stop we might find an exact match
1242 AddRecPhiMatch = &PN;
1244 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1248 if (AddRecPhiMatch) {
1249 // Ok, the add recurrence looks usable.
1250 // Remember this PHI, even in post-inc mode.
1251 InsertedValues.insert(AddRecPhiMatch);
1252 // Remember the increment.
1253 rememberInstruction(IncV);
1254 // Those values were not actually inserted but re-used.
1255 ReusedValues.insert(AddRecPhiMatch);
1256 ReusedValues.insert(IncV);
1257 return AddRecPhiMatch;
1261 // Save the original insertion point so we can restore it when we're done.
1262 SCEVInsertPointGuard Guard(Builder, this);
1264 // Another AddRec may need to be recursively expanded below. For example, if
1265 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1266 // loop. Remove this loop from the PostIncLoops set before expanding such
1267 // AddRecs. Otherwise, we cannot find a valid position for the step
1268 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1269 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1270 // so it's not worth implementing SmallPtrSet::swap.
1271 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1272 PostIncLoops.clear();
1274 // Expand code for the start value into the loop preheader.
1275 assert(L->getLoopPreheader() &&
1276 "Can't expand add recurrences without a loop preheader!");
1278 expandCodeForImpl(Normalized->getStart(), ExpandTy,
1279 L->getLoopPreheader()->getTerminator(), false);
1281 // StartV must have been be inserted into L's preheader to dominate the new
1283 assert(!isa<Instruction>(StartV) ||
1284 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1287 // Expand code for the step value. Do this before creating the PHI so that PHI
1288 // reuse code doesn't see an incomplete PHI.
1289 const SCEV *Step = Normalized->getStepRecurrence(SE);
1290 // If the stride is negative, insert a sub instead of an add for the increment
1291 // (unless it's a constant, because subtracts of constants are canonicalized
1293 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1295 Step = SE.getNegativeSCEV(Step);
1296 // Expand the step somewhere that dominates the loop header.
1297 Value *StepV = expandCodeForImpl(
1298 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1300 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1301 // we actually do emit an addition. It does not apply if we emit a
1303 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1304 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1307 BasicBlock *Header = L->getHeader();
1308 Builder.SetInsertPoint(Header, Header->begin());
1309 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1310 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1311 Twine(IVName) + ".iv");
1313 // Create the step instructions and populate the PHI.
1314 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1315 BasicBlock *Pred = *HPI;
1317 // Add a start value.
1318 if (!L->contains(Pred)) {
1319 PN->addIncoming(StartV, Pred);
1323 // Create a step value and add it to the PHI.
1324 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1325 // instructions at IVIncInsertPos.
1326 Instruction *InsertPos = L == IVIncInsertLoop ?
1327 IVIncInsertPos : Pred->getTerminator();
1328 Builder.SetInsertPoint(InsertPos);
1329 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1331 if (isa<OverflowingBinaryOperator>(IncV)) {
1333 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1335 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1337 PN->addIncoming(IncV, Pred);
1340 // After expanding subexpressions, restore the PostIncLoops set so the caller
1341 // can ensure that IVIncrement dominates the current uses.
1342 PostIncLoops = SavedPostIncLoops;
1344 // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1345 // effective when we are able to use an IV inserted here, so record it.
1346 InsertedValues.insert(PN);
1347 InsertedIVs.push_back(PN);
1351 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1352 Type *STy = S->getType();
1353 Type *IntTy = SE.getEffectiveSCEVType(STy);
1354 const Loop *L = S->getLoop();
1356 // Determine a normalized form of this expression, which is the expression
1357 // before any post-inc adjustment is made.
1358 const SCEVAddRecExpr *Normalized = S;
1359 if (PostIncLoops.count(L)) {
1360 PostIncLoopSet Loops;
1362 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1365 // Strip off any non-loop-dominating component from the addrec start.
1366 const SCEV *Start = Normalized->getStart();
1367 const SCEV *PostLoopOffset = nullptr;
1368 if (!SE.properlyDominates(Start, L->getHeader())) {
1369 PostLoopOffset = Start;
1370 Start = SE.getConstant(Normalized->getType(), 0);
1371 Normalized = cast<SCEVAddRecExpr>(
1372 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1373 Normalized->getLoop(),
1374 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1377 // Strip off any non-loop-dominating component from the addrec step.
1378 const SCEV *Step = Normalized->getStepRecurrence(SE);
1379 const SCEV *PostLoopScale = nullptr;
1380 if (!SE.dominates(Step, L->getHeader())) {
1381 PostLoopScale = Step;
1382 Step = SE.getConstant(Normalized->getType(), 1);
1383 if (!Start->isZero()) {
1384 // The normalization below assumes that Start is constant zero, so if
1385 // it isn't re-associate Start to PostLoopOffset.
1386 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1387 PostLoopOffset = Start;
1388 Start = SE.getConstant(Normalized->getType(), 0);
1391 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1392 Start, Step, Normalized->getLoop(),
1393 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1396 // Expand the core addrec. If we need post-loop scaling, force it to
1397 // expand to an integer type to avoid the need for additional casting.
1398 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1399 // We can't use a pointer type for the addrec if the pointer type is
1401 Type *AddRecPHIExpandTy =
1402 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1404 // In some cases, we decide to reuse an existing phi node but need to truncate
1405 // it and/or invert the step.
1406 Type *TruncTy = nullptr;
1407 bool InvertStep = false;
1408 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1409 IntTy, TruncTy, InvertStep);
1411 // Accommodate post-inc mode, if necessary.
1413 if (!PostIncLoops.count(L))
1416 // In PostInc mode, use the post-incremented value.
1417 BasicBlock *LatchBlock = L->getLoopLatch();
1418 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1419 Result = PN->getIncomingValueForBlock(LatchBlock);
1421 // We might be introducing a new use of the post-inc IV that is not poison
1422 // safe, in which case we should drop poison generating flags. Only keep
1423 // those flags for which SCEV has proven that they always hold.
1424 if (isa<OverflowingBinaryOperator>(Result)) {
1425 auto *I = cast<Instruction>(Result);
1426 if (!S->hasNoUnsignedWrap())
1427 I->setHasNoUnsignedWrap(false);
1428 if (!S->hasNoSignedWrap())
1429 I->setHasNoSignedWrap(false);
1432 // For an expansion to use the postinc form, the client must call
1433 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1434 // or dominated by IVIncInsertPos.
1435 if (isa<Instruction>(Result) &&
1436 !SE.DT.dominates(cast<Instruction>(Result),
1437 &*Builder.GetInsertPoint())) {
1438 // The induction variable's postinc expansion does not dominate this use.
1439 // IVUsers tries to prevent this case, so it is rare. However, it can
1440 // happen when an IVUser outside the loop is not dominated by the latch
1441 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1442 // all cases. Consider a phi outside whose operand is replaced during
1443 // expansion with the value of the postinc user. Without fundamentally
1444 // changing the way postinc users are tracked, the only remedy is
1445 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1446 // but hopefully expandCodeFor handles that.
1448 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1450 Step = SE.getNegativeSCEV(Step);
1453 // Expand the step somewhere that dominates the loop header.
1454 SCEVInsertPointGuard Guard(Builder, this);
1455 StepV = expandCodeForImpl(
1456 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1458 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1462 // We have decided to reuse an induction variable of a dominating loop. Apply
1463 // truncation and/or inversion of the step.
1465 Type *ResTy = Result->getType();
1466 // Normalize the result type.
1467 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1468 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1469 // Truncate the result.
1470 if (TruncTy != Result->getType())
1471 Result = Builder.CreateTrunc(Result, TruncTy);
1473 // Invert the result.
1475 Result = Builder.CreateSub(
1476 expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result);
1479 // Re-apply any non-loop-dominating scale.
1480 if (PostLoopScale) {
1481 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1482 Result = InsertNoopCastOfTo(Result, IntTy);
1483 Result = Builder.CreateMul(Result,
1484 expandCodeForImpl(PostLoopScale, IntTy, false));
1487 // Re-apply any non-loop-dominating offset.
1488 if (PostLoopOffset) {
1489 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1490 if (Result->getType()->isIntegerTy()) {
1491 Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false);
1492 Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1494 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1497 Result = InsertNoopCastOfTo(Result, IntTy);
1498 Result = Builder.CreateAdd(
1499 Result, expandCodeForImpl(PostLoopOffset, IntTy, false));
1506 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1507 // In canonical mode we compute the addrec as an expression of a canonical IV
1508 // using evaluateAtIteration and expand the resulting SCEV expression. This
1509 // way we avoid introducing new IVs to carry on the comutation of the addrec
1510 // throughout the loop.
1512 // For nested addrecs evaluateAtIteration might need a canonical IV of a
1513 // type wider than the addrec itself. Emitting a canonical IV of the
1514 // proper type might produce non-legal types, for example expanding an i64
1515 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1516 // back to non-canonical mode for nested addrecs.
1517 if (!CanonicalMode || (S->getNumOperands() > 2))
1518 return expandAddRecExprLiterally(S);
1520 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1521 const Loop *L = S->getLoop();
1523 // First check for an existing canonical IV in a suitable type.
1524 PHINode *CanonicalIV = nullptr;
1525 if (PHINode *PN = L->getCanonicalInductionVariable())
1526 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1529 // Rewrite an AddRec in terms of the canonical induction variable, if
1530 // its type is more narrow.
1532 SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1533 !S->getType()->isPointerTy()) {
1534 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1535 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1536 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1537 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1538 S->getNoWrapFlags(SCEV::FlagNW)));
1539 BasicBlock::iterator NewInsertPt =
1540 findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1541 V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1542 &*NewInsertPt, false);
1546 // {X,+,F} --> X + {0,+,F}
1547 if (!S->getStart()->isZero()) {
1548 if (PointerType *PTy = dyn_cast<PointerType>(S->getType())) {
1549 Value *StartV = expand(SE.getPointerBase(S));
1550 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1551 return expandAddToGEP(SE.removePointerBase(S), PTy, Ty, StartV);
1554 SmallVector<const SCEV *, 4> NewOps(S->operands());
1555 NewOps[0] = SE.getConstant(Ty, 0);
1556 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1557 S->getNoWrapFlags(SCEV::FlagNW));
1559 // Just do a normal add. Pre-expand the operands to suppress folding.
1561 // The LHS and RHS values are factored out of the expand call to make the
1562 // output independent of the argument evaluation order.
1563 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1564 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1565 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1568 // If we don't yet have a canonical IV, create one.
1570 // Create and insert the PHI node for the induction variable in the
1572 BasicBlock *Header = L->getHeader();
1573 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1574 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1576 rememberInstruction(CanonicalIV);
1578 SmallSet<BasicBlock *, 4> PredSeen;
1579 Constant *One = ConstantInt::get(Ty, 1);
1580 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1581 BasicBlock *HP = *HPI;
1582 if (!PredSeen.insert(HP).second) {
1583 // There must be an incoming value for each predecessor, even the
1585 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1589 if (L->contains(HP)) {
1590 // Insert a unit add instruction right before the terminator
1591 // corresponding to the back-edge.
1592 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1594 HP->getTerminator());
1595 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1596 rememberInstruction(Add);
1597 CanonicalIV->addIncoming(Add, HP);
1599 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1604 // {0,+,1} --> Insert a canonical induction variable into the loop!
1605 if (S->isAffine() && S->getOperand(1)->isOne()) {
1606 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1607 "IVs with types different from the canonical IV should "
1608 "already have been handled!");
1612 // {0,+,F} --> {0,+,1} * F
1614 // If this is a simple linear addrec, emit it now as a special case.
1615 if (S->isAffine()) // {0,+,F} --> i*F
1617 expand(SE.getTruncateOrNoop(
1618 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1619 SE.getNoopOrAnyExtend(S->getOperand(1),
1620 CanonicalIV->getType())),
1623 // If this is a chain of recurrences, turn it into a closed form, using the
1624 // folders, then expandCodeFor the closed form. This allows the folders to
1625 // simplify the expression without having to build a bunch of special code
1626 // into this folder.
1627 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1629 // Promote S up to the canonical IV type, if the cast is foldable.
1630 const SCEV *NewS = S;
1631 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1632 if (isa<SCEVAddRecExpr>(Ext))
1635 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1636 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1638 // Truncate the result down to the original type, if needed.
1639 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1643 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1645 expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false);
1646 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1647 GetOptimalInsertionPointForCastOf(V));
1650 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1651 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1652 Value *V = expandCodeForImpl(
1653 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1655 return Builder.CreateTrunc(V, Ty);
1658 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1659 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1660 Value *V = expandCodeForImpl(
1661 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1663 return Builder.CreateZExt(V, Ty);
1666 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1667 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1668 Value *V = expandCodeForImpl(
1669 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1671 return Builder.CreateSExt(V, Ty);
1674 Value *SCEVExpander::expandSMaxExpr(const SCEVNAryExpr *S) {
1675 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1676 Type *Ty = LHS->getType();
1677 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1678 // In the case of mixed integer and pointer types, do the
1679 // rest of the comparisons as integer.
1680 Type *OpTy = S->getOperand(i)->getType();
1681 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1682 Ty = SE.getEffectiveSCEVType(Ty);
1683 LHS = InsertNoopCastOfTo(LHS, Ty);
1685 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1687 if (Ty->isIntegerTy())
1688 Sel = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, {LHS, RHS},
1689 /*FMFSource=*/nullptr, "smax");
1691 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1692 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1696 // In the case of mixed integer and pointer types, cast the
1697 // final result back to the pointer type.
1698 if (LHS->getType() != S->getType())
1699 LHS = InsertNoopCastOfTo(LHS, S->getType());
1703 Value *SCEVExpander::expandUMaxExpr(const SCEVNAryExpr *S) {
1704 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1705 Type *Ty = LHS->getType();
1706 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1707 // In the case of mixed integer and pointer types, do the
1708 // rest of the comparisons as integer.
1709 Type *OpTy = S->getOperand(i)->getType();
1710 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1711 Ty = SE.getEffectiveSCEVType(Ty);
1712 LHS = InsertNoopCastOfTo(LHS, Ty);
1714 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1716 if (Ty->isIntegerTy())
1717 Sel = Builder.CreateIntrinsic(Intrinsic::umax, {Ty}, {LHS, RHS},
1718 /*FMFSource=*/nullptr, "umax");
1720 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1721 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1725 // In the case of mixed integer and pointer types, cast the
1726 // final result back to the pointer type.
1727 if (LHS->getType() != S->getType())
1728 LHS = InsertNoopCastOfTo(LHS, S->getType());
1732 Value *SCEVExpander::expandSMinExpr(const SCEVNAryExpr *S) {
1733 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1734 Type *Ty = LHS->getType();
1735 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1736 // In the case of mixed integer and pointer types, do the
1737 // rest of the comparisons as integer.
1738 Type *OpTy = S->getOperand(i)->getType();
1739 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1740 Ty = SE.getEffectiveSCEVType(Ty);
1741 LHS = InsertNoopCastOfTo(LHS, Ty);
1743 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1745 if (Ty->isIntegerTy())
1746 Sel = Builder.CreateIntrinsic(Intrinsic::smin, {Ty}, {LHS, RHS},
1747 /*FMFSource=*/nullptr, "smin");
1749 Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
1750 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
1754 // In the case of mixed integer and pointer types, cast the
1755 // final result back to the pointer type.
1756 if (LHS->getType() != S->getType())
1757 LHS = InsertNoopCastOfTo(LHS, S->getType());
1761 Value *SCEVExpander::expandUMinExpr(const SCEVNAryExpr *S) {
1762 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1763 Type *Ty = LHS->getType();
1764 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1765 // In the case of mixed integer and pointer types, do the
1766 // rest of the comparisons as integer.
1767 Type *OpTy = S->getOperand(i)->getType();
1768 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1769 Ty = SE.getEffectiveSCEVType(Ty);
1770 LHS = InsertNoopCastOfTo(LHS, Ty);
1772 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1774 if (Ty->isIntegerTy())
1775 Sel = Builder.CreateIntrinsic(Intrinsic::umin, {Ty}, {LHS, RHS},
1776 /*FMFSource=*/nullptr, "umin");
1778 Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
1779 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
1783 // In the case of mixed integer and pointer types, cast the
1784 // final result back to the pointer type.
1785 if (LHS->getType() != S->getType())
1786 LHS = InsertNoopCastOfTo(LHS, S->getType());
1790 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1791 return expandSMaxExpr(S);
1794 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1795 return expandUMaxExpr(S);
1798 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1799 return expandSMinExpr(S);
1802 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1803 return expandUMinExpr(S);
1806 Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
1807 SmallVector<Value *> Ops;
1808 for (const SCEV *Op : S->operands())
1809 Ops.emplace_back(expand(Op));
1811 Value *SaturationPoint =
1812 MinMaxIntrinsic::getSaturationPoint(Intrinsic::umin, S->getType());
1814 SmallVector<Value *> OpIsZero;
1815 for (Value *Op : ArrayRef<Value *>(Ops).drop_back())
1816 OpIsZero.emplace_back(Builder.CreateICmpEQ(Op, SaturationPoint));
1818 Value *AnyOpIsZero = Builder.CreateLogicalOr(OpIsZero);
1820 Value *NaiveUMin = expandUMinExpr(S);
1821 return Builder.CreateSelect(AnyOpIsZero, SaturationPoint, NaiveUMin);
1824 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1825 Instruction *IP, bool Root) {
1827 Value *V = expandCodeForImpl(SH, Ty, Root);
1831 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
1832 // Expand the code for this SCEV.
1833 Value *V = expand(SH);
1835 if (PreserveLCSSA) {
1836 if (auto *Inst = dyn_cast<Instruction>(V)) {
1837 // Create a temporary instruction to at the current insertion point, so we
1838 // can hand it off to the helper to create LCSSA PHIs if required for the
1840 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
1841 // would accept a insertion point and return an LCSSA phi for that
1842 // insertion point, so there is no need to insert & remove the temporary
1845 if (Inst->getType()->isIntegerTy())
1846 Tmp = cast<Instruction>(Builder.CreateIntToPtr(
1847 Inst, Inst->getType()->getPointerTo(), "tmp.lcssa.user"));
1849 assert(Inst->getType()->isPointerTy());
1850 Tmp = cast<Instruction>(Builder.CreatePtrToInt(
1851 Inst, Type::getInt32Ty(Inst->getContext()), "tmp.lcssa.user"));
1853 V = fixupLCSSAFormFor(Tmp, 0);
1855 // Clean up temporary instruction.
1856 InsertedValues.erase(Tmp);
1857 InsertedPostIncValues.erase(Tmp);
1858 Tmp->eraseFromParent();
1862 InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V;
1864 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1865 "non-trivial casts should be done with the SCEVs directly!");
1866 V = InsertNoopCastOfTo(V, Ty);
1871 ScalarEvolution::ValueOffsetPair
1872 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1873 const Instruction *InsertPt) {
1874 auto *Set = SE.getSCEVValues(S);
1875 // If the expansion is not in CanonicalMode, and the SCEV contains any
1876 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1877 if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1878 // If S is scConstant, it may be worse to reuse an existing Value.
1879 if (S->getSCEVType() != scConstant && Set) {
1880 // Choose a Value from the set which dominates the InsertPt.
1881 // InsertPt should be inside the Value's parent loop so as not to break
1883 for (auto const &VOPair : *Set) {
1884 Value *V = VOPair.first;
1885 ConstantInt *Offset = VOPair.second;
1886 Instruction *EntInst = dyn_cast_or_null<Instruction>(V);
1890 assert(EntInst->getFunction() == InsertPt->getFunction());
1891 if (S->getType() == V->getType() &&
1892 SE.DT.dominates(EntInst, InsertPt) &&
1893 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1894 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1899 return {nullptr, nullptr};
1902 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1903 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1904 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1905 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1906 // the expansion will try to reuse Value from ExprValueMap, and only when it
1907 // fails, expand the SCEV literally.
1908 Value *SCEVExpander::expand(const SCEV *S) {
1909 // Compute an insertion point for this SCEV object. Hoist the instructions
1910 // as far out in the loop nest as possible.
1911 Instruction *InsertPt = &*Builder.GetInsertPoint();
1913 // We can move insertion point only if there is no div or rem operations
1914 // otherwise we are risky to move it over the check for zero denominator.
1915 auto SafeToHoist = [](const SCEV *S) {
1916 return !SCEVExprContains(S, [](const SCEV *S) {
1917 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1918 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1919 // Division by non-zero constants can be hoisted.
1920 return SC->getValue()->isZero();
1921 // All other divisions should not be moved as they may be
1922 // divisions by zero and should be kept within the
1923 // conditions of the surrounding loops that guard their
1924 // execution (see PR35406).
1930 if (SafeToHoist(S)) {
1931 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1932 L = L->getParentLoop()) {
1933 if (SE.isLoopInvariant(S, L)) {
1935 if (BasicBlock *Preheader = L->getLoopPreheader())
1936 InsertPt = Preheader->getTerminator();
1938 // LSR sets the insertion point for AddRec start/step values to the
1939 // block start to simplify value reuse, even though it's an invalid
1940 // position. SCEVExpander must correct for this in all cases.
1941 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1943 // If the SCEV is computable at this level, insert it into the header
1944 // after the PHIs (and after any other instructions that we've inserted
1945 // there) so that it is guaranteed to dominate any user inside the loop.
1946 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1947 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1949 while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1950 (isInsertedInstruction(InsertPt) ||
1951 isa<DbgInfoIntrinsic>(InsertPt))) {
1952 InsertPt = &*std::next(InsertPt->getIterator());
1959 // Check to see if we already expanded this here.
1960 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1961 if (I != InsertedExpressions.end())
1964 SCEVInsertPointGuard Guard(Builder, this);
1965 Builder.SetInsertPoint(InsertPt);
1967 // Expand the expression into instructions.
1968 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1969 Value *V = VO.first;
1974 // If we're reusing an existing instruction, we are effectively CSEing two
1975 // copies of the instruction (with potentially different flags). As such,
1976 // we need to drop any poison generating flags unless we can prove that
1977 // said flags must be valid for all new users.
1978 if (auto *I = dyn_cast<Instruction>(V))
1979 if (I->hasPoisonGeneratingFlags() && !programUndefinedIfPoison(I))
1980 I->dropPoisonGeneratingFlags();
1983 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1984 int64_t Offset = VO.second->getSExtValue();
1986 ConstantInt::getSigned(VO.second->getType(), -Offset);
1987 unsigned AS = Vty->getAddressSpace();
1988 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
1989 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
1991 V = Builder.CreateBitCast(V, Vty);
1993 V = Builder.CreateSub(V, VO.second);
1997 // Remember the expanded value for this SCEV at this location.
1999 // This is independent of PostIncLoops. The mapped value simply materializes
2000 // the expression at this insertion point. If the mapped value happened to be
2001 // a postinc expansion, it could be reused by a non-postinc user, but only if
2002 // its insertion point was already at the head of the loop.
2003 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
2007 void SCEVExpander::rememberInstruction(Value *I) {
2008 auto DoInsert = [this](Value *V) {
2009 if (!PostIncLoops.empty())
2010 InsertedPostIncValues.insert(V);
2012 InsertedValues.insert(V);
2019 if (auto *Inst = dyn_cast<Instruction>(I)) {
2020 // A new instruction has been added, which might introduce new uses outside
2021 // a defining loop. Fix LCSSA from for each operand of the new instruction,
2023 for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd;
2025 fixupLCSSAFormFor(Inst, OpIdx);
2029 /// replaceCongruentIVs - Check for congruent phis in this loop header and
2030 /// replace them with their most canonical representative. Return the number of
2031 /// phis eliminated.
2033 /// This does not depend on any SCEVExpander state but should be used in
2034 /// the same context that SCEVExpander is used.
2036 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
2037 SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2038 const TargetTransformInfo *TTI) {
2039 // Find integer phis in order of increasing width.
2040 SmallVector<PHINode*, 8> Phis;
2041 for (PHINode &PN : L->getHeader()->phis())
2042 Phis.push_back(&PN);
2045 // Use stable_sort to preserve order of equivalent PHIs, so the order
2046 // of the sorted Phis is the same from run to run on the same loop.
2047 llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
2048 // Put pointers at the back and make sure pointer < pointer = false.
2049 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
2050 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
2051 return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() <
2052 LHS->getType()->getPrimitiveSizeInBits().getFixedSize();
2055 unsigned NumElim = 0;
2056 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
2057 // Process phis from wide to narrow. Map wide phis to their truncation
2058 // so narrow phis can reuse them.
2059 for (PHINode *Phi : Phis) {
2060 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
2061 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
2063 if (!SE.isSCEVable(PN->getType()))
2065 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
2068 return Const->getValue();
2071 // Fold constant phis. They may be congruent to other constant phis and
2072 // would confuse the logic below that expects proper IVs.
2073 if (Value *V = SimplifyPHINode(Phi)) {
2074 if (V->getType() != Phi->getType())
2076 Phi->replaceAllUsesWith(V);
2077 DeadInsts.emplace_back(Phi);
2079 SCEV_DEBUG_WITH_TYPE(DebugType,
2080 dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
2085 if (!SE.isSCEVable(Phi->getType()))
2088 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
2091 if (Phi->getType()->isIntegerTy() && TTI &&
2092 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
2093 // This phi can be freely truncated to the narrowest phi type. Map the
2094 // truncated expression to it so it will be reused for narrow types.
2095 const SCEV *TruncExpr =
2096 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
2097 ExprToIVMap[TruncExpr] = Phi;
2102 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
2104 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
2107 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2108 Instruction *OrigInc = dyn_cast<Instruction>(
2109 OrigPhiRef->getIncomingValueForBlock(LatchBlock));
2110 Instruction *IsomorphicInc =
2111 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
2113 if (OrigInc && IsomorphicInc) {
2114 // If this phi has the same width but is more canonical, replace the
2115 // original with it. As part of the "more canonical" determination,
2116 // respect a prior decision to use an IV chain.
2117 if (OrigPhiRef->getType() == Phi->getType() &&
2118 !(ChainedPhis.count(Phi) ||
2119 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
2120 (ChainedPhis.count(Phi) ||
2121 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2122 std::swap(OrigPhiRef, Phi);
2123 std::swap(OrigInc, IsomorphicInc);
2125 // Replacing the congruent phi is sufficient because acyclic
2126 // redundancy elimination, CSE/GVN, should handle the
2127 // rest. However, once SCEV proves that a phi is congruent,
2128 // it's often the head of an IV user cycle that is isomorphic
2129 // with the original phi. It's worth eagerly cleaning up the
2130 // common case of a single IV increment so that DeleteDeadPHIs
2131 // can remove cycles that had postinc uses.
2132 const SCEV *TruncExpr =
2133 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2134 if (OrigInc != IsomorphicInc &&
2135 TruncExpr == SE.getSCEV(IsomorphicInc) &&
2136 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2137 hoistIVInc(OrigInc, IsomorphicInc)) {
2138 SCEV_DEBUG_WITH_TYPE(
2139 DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2140 << *IsomorphicInc << '\n');
2141 Value *NewInc = OrigInc;
2142 if (OrigInc->getType() != IsomorphicInc->getType()) {
2143 Instruction *IP = nullptr;
2144 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2145 IP = &*PN->getParent()->getFirstInsertionPt();
2147 IP = OrigInc->getNextNode();
2149 IRBuilder<> Builder(IP);
2150 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2151 NewInc = Builder.CreateTruncOrBitCast(
2152 OrigInc, IsomorphicInc->getType(), IVName);
2154 IsomorphicInc->replaceAllUsesWith(NewInc);
2155 DeadInsts.emplace_back(IsomorphicInc);
2159 SCEV_DEBUG_WITH_TYPE(DebugType,
2160 dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
2162 SCEV_DEBUG_WITH_TYPE(
2163 DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
2165 Value *NewIV = OrigPhiRef;
2166 if (OrigPhiRef->getType() != Phi->getType()) {
2167 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2168 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2169 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2171 Phi->replaceAllUsesWith(NewIV);
2172 DeadInsts.emplace_back(Phi);
2177 Optional<ScalarEvolution::ValueOffsetPair>
2178 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
2180 using namespace llvm::PatternMatch;
2182 SmallVector<BasicBlock *, 4> ExitingBlocks;
2183 L->getExitingBlocks(ExitingBlocks);
2185 // Look for suitable value in simple conditions at the loop exits.
2186 for (BasicBlock *BB : ExitingBlocks) {
2187 ICmpInst::Predicate Pred;
2188 Instruction *LHS, *RHS;
2190 if (!match(BB->getTerminator(),
2191 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
2192 m_BasicBlock(), m_BasicBlock())))
2195 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2196 return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
2198 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2199 return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2202 // Use expand's logic which is used for reusing a previous Value in
2203 // ExprValueMap. Note that we don't currently model the cost of
2204 // needing to drop poison generating flags on the instruction if we
2205 // want to reuse it. We effectively assume that has zero cost.
2206 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2210 // There is potential to make this significantly smarter, but this simple
2211 // heuristic already gets some interesting cases.
2213 // Can not find suitable value.
2217 template<typename T> static InstructionCost costAndCollectOperands(
2218 const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
2219 TargetTransformInfo::TargetCostKind CostKind,
2220 SmallVectorImpl<SCEVOperand> &Worklist) {
2222 const T *S = cast<T>(WorkItem.S);
2223 InstructionCost Cost = 0;
2224 // Object to help map SCEV operands to expanded IR instructions.
2225 struct OperationIndices {
2226 OperationIndices(unsigned Opc, size_t min, size_t max) :
2227 Opcode(Opc), MinIdx(min), MaxIdx(max) { }
2233 // Collect the operations of all the instructions that will be needed to
2234 // expand the SCEVExpr. This is so that when we come to cost the operands,
2235 // we know what the generated user(s) will be.
2236 SmallVector<OperationIndices, 2> Operations;
2238 auto CastCost = [&](unsigned Opcode) -> InstructionCost {
2239 Operations.emplace_back(Opcode, 0, 0);
2240 return TTI.getCastInstrCost(Opcode, S->getType(),
2241 S->getOperand(0)->getType(),
2242 TTI::CastContextHint::None, CostKind);
2245 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
2246 unsigned MinIdx = 0,
2247 unsigned MaxIdx = 1) -> InstructionCost {
2248 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2249 return NumRequired *
2250 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
2253 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
2254 unsigned MaxIdx) -> InstructionCost {
2255 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2256 Type *OpType = S->getOperand(0)->getType();
2257 return NumRequired * TTI.getCmpSelInstrCost(
2258 Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2259 CmpInst::BAD_ICMP_PREDICATE, CostKind);
2262 switch (S->getSCEVType()) {
2263 case scCouldNotCompute:
2264 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2269 Cost = CastCost(Instruction::PtrToInt);
2272 Cost = CastCost(Instruction::Trunc);
2275 Cost = CastCost(Instruction::ZExt);
2278 Cost = CastCost(Instruction::SExt);
2281 unsigned Opcode = Instruction::UDiv;
2282 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2283 if (SC->getAPInt().isPowerOf2())
2284 Opcode = Instruction::LShr;
2285 Cost = ArithCost(Opcode, 1);
2289 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2292 // TODO: this is a very pessimistic cost modelling for Mul,
2293 // because of Bin Pow algorithm actually used by the expander,
2294 // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2295 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2301 case scSequentialUMinExpr: {
2302 // FIXME: should this ask the cost for Intrinsic's?
2303 // The reduction tree.
2304 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2305 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2306 switch (S->getSCEVType()) {
2307 case scSequentialUMinExpr: {
2308 // The safety net against poison.
2309 // FIXME: this is broken.
2310 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2311 Cost += ArithCost(Instruction::Or,
2312 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2313 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2317 assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
2318 "Unhandled SCEV expression type?");
2323 case scAddRecExpr: {
2324 // In this polynominal, we may have some zero operands, and we shouldn't
2325 // really charge for those. So how many non-zero coeffients are there?
2326 int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
2327 return !Op->isZero();
2330 assert(NumTerms >= 1 && "Polynominal should have at least one term.");
2331 assert(!(*std::prev(S->operands().end()))->isZero() &&
2332 "Last operand should not be zero");
2334 // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
2335 int NumNonZeroDegreeNonOneTerms =
2336 llvm::count_if(S->operands(), [](const SCEV *Op) {
2337 auto *SConst = dyn_cast<SCEVConstant>(Op);
2338 return !SConst || SConst->getAPInt().ugt(1);
2341 // Much like with normal add expr, the polynominal will require
2342 // one less addition than the number of it's terms.
2343 InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
2344 /*MinIdx*/ 1, /*MaxIdx*/ 1);
2345 // Here, *each* one of those will require a multiplication.
2346 InstructionCost MulCost =
2347 ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2348 Cost = AddCost + MulCost;
2350 // What is the degree of this polynominal?
2351 int PolyDegree = S->getNumOperands() - 1;
2352 assert(PolyDegree >= 1 && "Should be at least affine.");
2354 // The final term will be:
2355 // Op_{PolyDegree} * x ^ {PolyDegree}
2356 // Where x ^ {PolyDegree} will again require PolyDegree-1 mul operations.
2357 // Note that x ^ {PolyDegree} = x * x ^ {PolyDegree-1} so charging for
2358 // x ^ {PolyDegree} will give us x ^ {2} .. x ^ {PolyDegree-1} for free.
2359 // FIXME: this is conservatively correct, but might be overly pessimistic.
2360 Cost += MulCost * (PolyDegree - 1);
2365 for (auto &CostOp : Operations) {
2366 for (auto SCEVOp : enumerate(S->operands())) {
2367 // Clamp the index to account for multiple IR operations being chained.
2368 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2369 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2370 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2376 bool SCEVExpander::isHighCostExpansionHelper(
2377 const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2378 InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2379 SmallPtrSetImpl<const SCEV *> &Processed,
2380 SmallVectorImpl<SCEVOperand> &Worklist) {
2382 return true; // Already run out of budget, give up.
2384 const SCEV *S = WorkItem.S;
2385 // Was the cost of expansion of this expression already accounted for?
2386 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2387 return false; // We have already accounted for this expression.
2389 // If we can find an existing value for this scev available at the point "At"
2390 // then consider the expression cheap.
2391 if (getRelatedExistingExpansion(S, &At, L))
2392 return false; // Consider the expression to be free.
2394 TargetTransformInfo::TargetCostKind CostKind =
2395 L->getHeader()->getParent()->hasMinSize()
2396 ? TargetTransformInfo::TCK_CodeSize
2397 : TargetTransformInfo::TCK_RecipThroughput;
2399 switch (S->getSCEVType()) {
2400 case scCouldNotCompute:
2401 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2403 // Assume to be zero-cost.
2406 // Only evalulate the costs of constants when optimizing for size.
2407 if (CostKind != TargetTransformInfo::TCK_CodeSize)
2409 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2410 Type *Ty = S->getType();
2411 Cost += TTI.getIntImmCostInst(
2412 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2413 return Cost > Budget;
2418 case scSignExtend: {
2420 costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
2421 return false; // Will answer upon next entry into this function.
2424 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2425 // HowManyLessThans produced to compute a precise expression, rather than a
2426 // UDiv from the user's code. If we can't find a UDiv in the code with some
2427 // simple searching, we need to account for it's cost.
2429 // At the beginning of this function we already tried to find existing
2430 // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2431 // pattern involving division. This is just a simple search heuristic.
2432 if (getRelatedExistingExpansion(
2433 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2434 return false; // Consider it to be free.
2437 costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2438 return false; // Will answer upon next entry into this function.
2446 case scSequentialUMinExpr: {
2447 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2448 "Nary expr should have more than 1 operand.");
2449 // The simple nary expr will require one less op (or pair of ops)
2450 // than the number of it's terms.
2452 costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2453 return Cost > Budget;
2455 case scAddRecExpr: {
2456 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2457 "Polynomial should be at least linear");
2458 Cost += costAndCollectOperands<SCEVAddRecExpr>(
2459 WorkItem, TTI, CostKind, Worklist);
2460 return Cost > Budget;
2463 llvm_unreachable("Unknown SCEV kind!");
2466 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2469 switch (Pred->getKind()) {
2470 case SCEVPredicate::P_Union:
2471 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2472 case SCEVPredicate::P_Equal:
2473 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2474 case SCEVPredicate::P_Wrap: {
2475 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2476 return expandWrapPredicate(AddRecPred, IP);
2479 llvm_unreachable("Unknown SCEV predicate type");
2482 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2485 expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
2487 expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
2489 Builder.SetInsertPoint(IP);
2490 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2494 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2495 Instruction *Loc, bool Signed) {
2496 assert(AR->isAffine() && "Cannot generate RT check for "
2497 "non-affine expression");
2499 SCEVUnionPredicate Pred;
2500 const SCEV *ExitCount =
2501 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2503 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2505 const SCEV *Step = AR->getStepRecurrence(SE);
2506 const SCEV *Start = AR->getStart();
2508 Type *ARTy = AR->getType();
2509 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2510 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2512 // The expression {Start,+,Step} has nusw/nssw if
2513 // Step < 0, Start - |Step| * Backedge <= Start
2514 // Step >= 0, Start + |Step| * Backedge > Start
2515 // and |Step| * Backedge doesn't unsigned overflow.
2517 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2518 Builder.SetInsertPoint(Loc);
2519 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false);
2522 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2524 Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false);
2525 Value *NegStepValue =
2526 expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false);
2527 Value *StartValue = expandCodeForImpl(Start, ARTy, Loc, false);
2530 ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2532 Builder.SetInsertPoint(Loc);
2534 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2535 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2537 // Compute |Step| * Backedge
2539 // 1. Start + |Step| * Backedge < Start
2540 // 2. Start - |Step| * Backedge > Start
2542 // And select either 1. or 2. depending on whether step is positive or
2543 // negative. If Step is known to be positive or negative, only create
2545 auto ComputeEndCheck = [&]() -> Value * {
2546 // Checking <u 0 is always false.
2547 if (!Signed && Start->isZero() && SE.isKnownPositive(Step))
2548 return ConstantInt::getFalse(Loc->getContext());
2550 // Get the backedge taken count and truncate or extended to the AR type.
2551 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2553 Value *MulV, *OfMul;
2554 if (Step->isOne()) {
2555 // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
2556 // needed, there is never an overflow, so to avoid artificially inflating
2557 // the cost of the check, directly emit the optimized IR.
2558 MulV = TruncTripCount;
2559 OfMul = ConstantInt::getFalse(MulV->getContext());
2561 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2562 Intrinsic::umul_with_overflow, Ty);
2564 Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2565 MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2566 OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2569 Value *Add = nullptr, *Sub = nullptr;
2570 bool NeedPosCheck = !SE.isKnownNegative(Step);
2571 bool NeedNegCheck = !SE.isKnownPositive(Step);
2573 if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
2574 StartValue = InsertNoopCastOfTo(
2575 StartValue, Builder.getInt8PtrTy(ARPtrTy->getAddressSpace()));
2576 Value *NegMulV = Builder.CreateNeg(MulV);
2578 Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
2580 Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
2583 Add = Builder.CreateAdd(StartValue, MulV);
2585 Sub = Builder.CreateSub(StartValue, MulV);
2588 Value *EndCompareLT = nullptr;
2589 Value *EndCompareGT = nullptr;
2590 Value *EndCheck = nullptr;
2592 EndCheck = EndCompareLT = Builder.CreateICmp(
2593 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2595 EndCheck = EndCompareGT = Builder.CreateICmp(
2596 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2597 if (NeedPosCheck && NeedNegCheck) {
2598 // Select the answer based on the sign of Step.
2599 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2601 return Builder.CreateOr(EndCheck, OfMul);
2603 Value *EndCheck = ComputeEndCheck();
2605 // If the backedge taken count type is larger than the AR type,
2606 // check that we don't drop any bits by truncating it. If we are
2607 // dropping bits, then we have overflow (unless the step is zero).
2608 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2609 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2610 auto *BackedgeCheck =
2611 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2612 ConstantInt::get(Loc->getContext(), MaxVal));
2613 BackedgeCheck = Builder.CreateAnd(
2614 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2616 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2622 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2624 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2625 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2627 // Add a check for NUSW
2628 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2629 NUSWCheck = generateOverflowCheck(A, IP, false);
2631 // Add a check for NSSW
2632 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2633 NSSWCheck = generateOverflowCheck(A, IP, true);
2635 if (NUSWCheck && NSSWCheck)
2636 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2644 return ConstantInt::getFalse(IP->getContext());
2647 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2649 // Loop over all checks in this set.
2650 SmallVector<Value *> Checks;
2651 for (auto Pred : Union->getPredicates()) {
2652 Checks.push_back(expandCodeForPredicate(Pred, IP));
2653 Builder.SetInsertPoint(IP);
2657 return ConstantInt::getFalse(IP->getContext());
2658 return Builder.CreateOr(Checks);
2661 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
2662 assert(PreserveLCSSA);
2663 SmallVector<Instruction *, 1> ToUpdate;
2665 auto *OpV = User->getOperand(OpIdx);
2666 auto *OpI = dyn_cast<Instruction>(OpV);
2670 Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent());
2671 Loop *UseLoop = SE.LI.getLoopFor(User->getParent());
2672 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2675 ToUpdate.push_back(OpI);
2676 SmallVector<PHINode *, 16> PHIsToRemove;
2677 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove);
2678 for (PHINode *PN : PHIsToRemove) {
2679 if (!PN->use_empty())
2681 InsertedValues.erase(PN);
2682 InsertedPostIncValues.erase(PN);
2683 PN->eraseFromParent();
2686 return User->getOperand(OpIdx);
2690 // Search for a SCEV subexpression that is not safe to expand. Any expression
2691 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2692 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2693 // instruction, but the important thing is that we prove the denominator is
2694 // nonzero before expansion.
2696 // IVUsers already checks that IV-derived expressions are safe. So this check is
2697 // only needed when the expression includes some subexpression that is not IV
2700 // Currently, we only allow division by a nonzero constant here. If this is
2701 // inadequate, we could easily allow division by SCEVUnknown by using
2702 // ValueTracking to check isKnownNonZero().
2704 // We cannot generally expand recurrences unless the step dominates the loop
2705 // header. The expander handles the special case of affine recurrences by
2706 // scaling the recurrence outside the loop, but this technique isn't generally
2707 // applicable. Expanding a nested recurrence outside a loop requires computing
2708 // binomial coefficients. This could be done, but the recurrence has to be in a
2709 // perfectly reduced form, which can't be guaranteed.
2710 struct SCEVFindUnsafe {
2711 ScalarEvolution &SE;
2715 SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2716 : SE(SE), CanonicalMode(CanonicalMode), IsUnsafe(false) {}
2718 bool follow(const SCEV *S) {
2719 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2720 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2721 if (!SC || SC->getValue()->isZero()) {
2726 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2727 const SCEV *Step = AR->getStepRecurrence(SE);
2728 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2733 // For non-affine addrecs or in non-canonical mode we need a preheader
2735 if (!AR->getLoop()->getLoopPreheader() &&
2736 (!CanonicalMode || !AR->isAffine())) {
2743 bool isDone() const { return IsUnsafe; }
2748 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE, bool CanonicalMode) {
2749 SCEVFindUnsafe Search(SE, CanonicalMode);
2750 visitAll(S, Search);
2751 return !Search.IsUnsafe;
2754 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2755 ScalarEvolution &SE) {
2756 if (!isSafeToExpand(S, SE))
2758 // We have to prove that the expanded site of S dominates InsertionPoint.
2759 // This is easy when not in the same block, but hard when S is an instruction
2760 // to be expanded somewhere inside the same block as our insertion point.
2761 // What we really need here is something analogous to an OrderedBasicBlock,
2762 // but for the moment, we paper over the problem by handling two common and
2763 // cheap to check cases.
2764 if (SE.properlyDominates(S, InsertionPoint->getParent()))
2766 if (SE.dominates(S, InsertionPoint->getParent())) {
2767 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2769 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2770 if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2776 void SCEVExpanderCleaner::cleanup() {
2777 // Result is used, nothing to remove.
2781 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2783 SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2784 InsertedInstructions.end());
2787 // Remove sets with value handles.
2790 // Remove all inserted instructions.
2791 for (Instruction *I : reverse(InsertedInstructions)) {
2793 assert(all_of(I->users(),
2794 [&InsertedSet](Value *U) {
2795 return InsertedSet.contains(cast<Instruction>(U));
2797 "removed instruction should only be used by instructions inserted "
2798 "during expansion");
2800 assert(!I->getType()->isVoidTy() &&
2801 "inserted instruction should have non-void types");
2802 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2803 I->eraseFromParent();