1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 // of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 // widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 // of vectorization. It decides on the optimal vector width, which
27 // can be one, if vectorization is not profitable.
29 //===----------------------------------------------------------------------===//
31 // The reduction-variable vectorization is based on the paper:
32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
34 // Variable uniformity checks are inspired by:
35 // Karrenberg, R. and Hack, S. Whole Function Vectorization.
37 // The interleaved access vectorization is based on the paper:
38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
41 // Other ideas/concepts are from:
42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
45 // Vectorizing Compilers.
47 //===----------------------------------------------------------------------===//
49 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/Hashing.h"
52 #include "llvm/ADT/MapVector.h"
53 #include "llvm/ADT/Optional.h"
54 #include "llvm/ADT/SCCIterator.h"
55 #include "llvm/ADT/SetVector.h"
56 #include "llvm/ADT/SmallPtrSet.h"
57 #include "llvm/ADT/SmallSet.h"
58 #include "llvm/ADT/SmallVector.h"
59 #include "llvm/ADT/Statistic.h"
60 #include "llvm/ADT/StringExtras.h"
61 #include "llvm/Analysis/CodeMetrics.h"
62 #include "llvm/Analysis/GlobalsModRef.h"
63 #include "llvm/Analysis/LoopInfo.h"
64 #include "llvm/Analysis/LoopIterator.h"
65 #include "llvm/Analysis/LoopPass.h"
66 #include "llvm/Analysis/ScalarEvolutionExpander.h"
67 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
68 #include "llvm/Analysis/ValueTracking.h"
69 #include "llvm/Analysis/VectorUtils.h"
70 #include "llvm/IR/Constants.h"
71 #include "llvm/IR/DataLayout.h"
72 #include "llvm/IR/DebugInfo.h"
73 #include "llvm/IR/DerivedTypes.h"
74 #include "llvm/IR/DiagnosticInfo.h"
75 #include "llvm/IR/Dominators.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/IR/IRBuilder.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/LLVMContext.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/User.h"
85 #include "llvm/IR/Value.h"
86 #include "llvm/IR/ValueHandle.h"
87 #include "llvm/IR/Verifier.h"
88 #include "llvm/Pass.h"
89 #include "llvm/Support/BranchProbability.h"
90 #include "llvm/Support/CommandLine.h"
91 #include "llvm/Support/Debug.h"
92 #include "llvm/Support/raw_ostream.h"
93 #include "llvm/Transforms/Scalar.h"
94 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
95 #include "llvm/Transforms/Utils/Local.h"
96 #include "llvm/Transforms/Utils/LoopSimplify.h"
97 #include "llvm/Transforms/Utils/LoopUtils.h"
98 #include "llvm/Transforms/Utils/LoopVersioning.h"
99 #include "llvm/Transforms/Vectorize.h"
104 using namespace llvm;
105 using namespace llvm::PatternMatch;
107 #define LV_NAME "loop-vectorize"
108 #define DEBUG_TYPE LV_NAME
110 STATISTIC(LoopsVectorized, "Number of loops vectorized");
111 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
114 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
115 cl::desc("Enable if-conversion during vectorization."));
117 /// We don't vectorize loops with a known constant trip count below this number.
118 static cl::opt<unsigned> TinyTripCountVectorThreshold(
119 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
120 cl::desc("Don't vectorize loops with a constant "
121 "trip count that is smaller than this "
124 static cl::opt<bool> MaximizeBandwidth(
125 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
126 cl::desc("Maximize bandwidth when selecting vectorization factor which "
127 "will be determined by the smallest type in loop."));
129 static cl::opt<bool> EnableInterleavedMemAccesses(
130 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
131 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
133 /// Maximum factor for an interleaved memory access.
134 static cl::opt<unsigned> MaxInterleaveGroupFactor(
135 "max-interleave-group-factor", cl::Hidden,
136 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
139 /// We don't interleave loops with a known constant trip count below this
141 static const unsigned TinyTripCountInterleaveThreshold = 128;
143 static cl::opt<unsigned> ForceTargetNumScalarRegs(
144 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
145 cl::desc("A flag that overrides the target's number of scalar registers."));
147 static cl::opt<unsigned> ForceTargetNumVectorRegs(
148 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
149 cl::desc("A flag that overrides the target's number of vector registers."));
151 /// Maximum vectorization interleave count.
152 static const unsigned MaxInterleaveFactor = 16;
154 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
155 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
156 cl::desc("A flag that overrides the target's max interleave factor for "
159 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
160 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
161 cl::desc("A flag that overrides the target's max interleave factor for "
162 "vectorized loops."));
164 static cl::opt<unsigned> ForceTargetInstructionCost(
165 "force-target-instruction-cost", cl::init(0), cl::Hidden,
166 cl::desc("A flag that overrides the target's expected cost for "
167 "an instruction to a single constant value. Mostly "
168 "useful for getting consistent testing."));
170 static cl::opt<unsigned> SmallLoopCost(
171 "small-loop-cost", cl::init(20), cl::Hidden,
173 "The cost of a loop that is considered 'small' by the interleaver."));
175 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
176 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden,
177 cl::desc("Enable the use of the block frequency analysis to access PGO "
178 "heuristics minimizing code growth in cold regions and being more "
179 "aggressive in hot regions."));
181 // Runtime interleave loops for load/store throughput.
182 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
183 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
185 "Enable runtime interleaving until load/store ports are saturated"));
187 /// The number of stores in a loop that are allowed to need predication.
188 static cl::opt<unsigned> NumberOfStoresToPredicate(
189 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
190 cl::desc("Max number of stores to be predicated behind an if."));
192 static cl::opt<bool> EnableIndVarRegisterHeur(
193 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
194 cl::desc("Count the induction variable only once when interleaving"));
196 static cl::opt<bool> EnableCondStoresVectorization(
197 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
198 cl::desc("Enable if predication of stores during vectorization."));
200 static cl::opt<unsigned> MaxNestedScalarReductionIC(
201 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
202 cl::desc("The maximum interleave count to use when interleaving a scalar "
203 "reduction in a nested loop."));
205 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
206 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
207 cl::desc("The maximum allowed number of runtime memory checks with a "
208 "vectorize(enable) pragma."));
210 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
211 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
212 cl::desc("The maximum number of SCEV checks allowed."));
214 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
215 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
216 cl::desc("The maximum number of SCEV checks allowed with a "
217 "vectorize(enable) pragma"));
219 /// Create an analysis remark that explains why vectorization failed
221 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
222 /// RemarkName is the identifier for the remark. If \p I is passed it is an
223 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for
224 /// the location of the remark. \return the remark object that can be
226 static OptimizationRemarkAnalysis
227 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
228 Instruction *I = nullptr) {
229 Value *CodeRegion = TheLoop->getHeader();
230 DebugLoc DL = TheLoop->getStartLoc();
233 CodeRegion = I->getParent();
234 // If there is no debug location attached to the instruction, revert back to
236 if (I->getDebugLoc())
237 DL = I->getDebugLoc();
240 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
241 R << "loop not vectorized: ";
247 // Forward declarations.
248 class LoopVectorizeHints;
249 class LoopVectorizationLegality;
250 class LoopVectorizationCostModel;
251 class LoopVectorizationRequirements;
253 /// Returns true if the given loop body has a cycle, excluding the loop
255 static bool hasCyclesInLoopBody(const Loop &L) {
259 for (const auto &SCC :
260 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L),
261 scc_iterator<Loop, LoopBodyTraits>::end(L))) {
262 if (SCC.size() > 1) {
263 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n");
271 /// A helper function for converting Scalar types to vector types.
272 /// If the incoming type is void, we return void. If the VF is 1, we return
274 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
275 if (Scalar->isVoidTy() || VF == 1)
277 return VectorType::get(Scalar, VF);
280 // FIXME: The following helper functions have multiple implementations
281 // in the project. They can be effectively organized in a common Load/Store
284 /// A helper function that returns the pointer operand of a load or store
286 static Value *getPointerOperand(Value *I) {
287 if (auto *LI = dyn_cast<LoadInst>(I))
288 return LI->getPointerOperand();
289 if (auto *SI = dyn_cast<StoreInst>(I))
290 return SI->getPointerOperand();
294 /// A helper function that returns the type of loaded or stored value.
295 static Type *getMemInstValueType(Value *I) {
296 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
297 "Expected Load or Store instruction");
298 if (auto *LI = dyn_cast<LoadInst>(I))
299 return LI->getType();
300 return cast<StoreInst>(I)->getValueOperand()->getType();
303 /// A helper function that returns the alignment of load or store instruction.
304 static unsigned getMemInstAlignment(Value *I) {
305 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
306 "Expected Load or Store instruction");
307 if (auto *LI = dyn_cast<LoadInst>(I))
308 return LI->getAlignment();
309 return cast<StoreInst>(I)->getAlignment();
312 /// A helper function that returns the address space of the pointer operand of
313 /// load or store instruction.
314 static unsigned getMemInstAddressSpace(Value *I) {
315 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
316 "Expected Load or Store instruction");
317 if (auto *LI = dyn_cast<LoadInst>(I))
318 return LI->getPointerAddressSpace();
319 return cast<StoreInst>(I)->getPointerAddressSpace();
322 /// A helper function that returns true if the given type is irregular. The
323 /// type is irregular if its allocated size doesn't equal the store size of an
324 /// element of the corresponding vector type at the given vectorization factor.
325 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
327 // Determine if an array of VF elements of type Ty is "bitcast compatible"
328 // with a <VF x Ty> vector.
330 auto *VectorTy = VectorType::get(Ty, VF);
331 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
334 // If the vectorization factor is one, we just check if an array of type Ty
335 // requires padding between elements.
336 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
339 /// A helper function that returns the reciprocal of the block probability of
340 /// predicated blocks. If we return X, we are assuming the predicated block
341 /// will execute once for for every X iterations of the loop header.
343 /// TODO: We should use actual block probability here, if available. Currently,
344 /// we always assume predicated blocks have a 50% chance of executing.
345 static unsigned getReciprocalPredBlockProb() { return 2; }
347 /// A helper function that adds a 'fast' flag to floating-point operations.
348 static Value *addFastMathFlag(Value *V) {
349 if (isa<FPMathOperator>(V)) {
351 Flags.setUnsafeAlgebra();
352 cast<Instruction>(V)->setFastMathFlags(Flags);
357 /// A helper function that returns an integer or floating-point constant with
359 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
360 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
361 : ConstantFP::get(Ty, C);
364 /// InnerLoopVectorizer vectorizes loops which contain only one basic
365 /// block to a specified vectorization factor (VF).
366 /// This class performs the widening of scalars into vectors, or multiple
367 /// scalars. This class also implements the following features:
368 /// * It inserts an epilogue loop for handling loops that don't have iteration
369 /// counts that are known to be a multiple of the vectorization factor.
370 /// * It handles the code generation for reduction variables.
371 /// * Scalarization (implementation using scalars) of un-vectorizable
373 /// InnerLoopVectorizer does not perform any vectorization-legality
374 /// checks, and relies on the caller to check for the different legality
375 /// aspects. The InnerLoopVectorizer relies on the
376 /// LoopVectorizationLegality class to provide information about the induction
377 /// and reduction variables that were found to a given vectorization factor.
378 class InnerLoopVectorizer {
380 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
381 LoopInfo *LI, DominatorTree *DT,
382 const TargetLibraryInfo *TLI,
383 const TargetTransformInfo *TTI, AssumptionCache *AC,
384 OptimizationRemarkEmitter *ORE, unsigned VecWidth,
385 unsigned UnrollFactor, LoopVectorizationLegality *LVL,
386 LoopVectorizationCostModel *CM)
387 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
388 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
389 Builder(PSE.getSE()->getContext()), Induction(nullptr),
390 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth),
391 TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM),
392 AddedSafetyChecks(false) {}
394 // Perform the actual loop widening (vectorization).
396 // Create a new empty loop. Unlink the old loop and connect the new one.
398 // Widen each instruction in the old loop to a new one in the new loop.
402 // Return true if any runtime check is added.
403 bool areSafetyChecksAdded() { return AddedSafetyChecks; }
405 virtual ~InnerLoopVectorizer() {}
408 /// A small list of PHINodes.
409 typedef SmallVector<PHINode *, 4> PhiVector;
411 /// A type for vectorized values in the new loop. Each value from the
412 /// original loop, when vectorized, is represented by UF vector values in the
413 /// new unrolled loop, where UF is the unroll factor.
414 typedef SmallVector<Value *, 2> VectorParts;
416 /// A type for scalarized values in the new loop. Each value from the
417 /// original loop, when scalarized, is represented by UF x VF scalar values
418 /// in the new unrolled loop, where UF is the unroll factor and VF is the
419 /// vectorization factor.
420 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts;
422 // When we if-convert we need to create edge masks. We have to cache values
423 // so that we don't end up with exponential recursion/IR.
424 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>
426 typedef DenseMap<BasicBlock *, VectorParts> BlockMaskCacheTy;
428 /// Create an empty loop, based on the loop ranges of the old loop.
429 void createEmptyLoop();
431 /// Set up the values of the IVs correctly when exiting the vector loop.
432 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
433 Value *CountRoundDown, Value *EndValue,
434 BasicBlock *MiddleBlock);
436 /// Create a new induction variable inside L.
437 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
438 Value *Step, Instruction *DL);
439 /// Copy and widen the instructions from the old loop.
440 virtual void vectorizeLoop();
442 /// Handle all cross-iteration phis in the header.
443 void fixCrossIterationPHIs();
445 /// Fix a first-order recurrence. This is the second phase of vectorizing
447 void fixFirstOrderRecurrence(PHINode *Phi);
449 /// Fix a reduction cross-iteration phi. This is the second phase of
450 /// vectorizing this phi node.
451 void fixReduction(PHINode *Phi);
453 /// \brief The Loop exit block may have single value PHI nodes where the
454 /// incoming value is 'Undef'. While vectorizing we only handled real values
455 /// that were defined inside the loop. Here we fix the 'undef case'.
459 /// Iteratively sink the scalarized operands of a predicated instruction into
460 /// the block that was created for it.
461 void sinkScalarOperands(Instruction *PredInst);
463 /// Predicate conditional instructions that require predication on their
464 /// respective conditions.
465 void predicateInstructions();
467 /// Collect the instructions from the original loop that would be trivially
468 /// dead in the vectorized loop if generated.
469 void collectTriviallyDeadInstructions(
470 SmallPtrSetImpl<Instruction *> &DeadInstructions);
472 /// Shrinks vector element sizes to the smallest bitwidth they can be legally
474 void truncateToMinimalBitwidths();
476 /// A helper function that computes the predicate of the block BB, assuming
477 /// that the header block of the loop is set to True. It returns the *entry*
478 /// mask for the block BB.
479 VectorParts createBlockInMask(BasicBlock *BB);
480 /// A helper function that computes the predicate of the edge between SRC
482 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst);
484 /// A helper function to vectorize a single instruction within the innermost
486 void vectorizeInstruction(Instruction &I);
488 /// Vectorize a single PHINode in a block. This method handles the induction
489 /// variable canonicalization. It supports both VF = 1 for unrolled loops and
490 /// arbitrary length vectors.
491 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
493 /// Insert the new loop to the loop hierarchy and pass manager
494 /// and update the analysis passes.
495 void updateAnalysis();
497 /// This instruction is un-vectorizable. Implement it as a sequence
498 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each
499 /// scalarized instruction behind an if block predicated on the control
500 /// dependence of the instruction.
501 virtual void scalarizeInstruction(Instruction *Instr,
502 bool IfPredicateInstr = false);
504 /// Vectorize Load and Store instructions,
505 virtual void vectorizeMemoryInstruction(Instruction *Instr);
507 /// Create a broadcast instruction. This method generates a broadcast
508 /// instruction (shuffle) for loop invariant values and for the induction
509 /// value. If this is the induction variable then we extend it to N, N+1, ...
510 /// this is needed because each iteration in the loop corresponds to a SIMD
512 virtual Value *getBroadcastInstrs(Value *V);
514 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
515 /// to each vector element of Val. The sequence starts at StartIndex.
516 /// \p Opcode is relevant for FP induction variable.
517 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
518 Instruction::BinaryOps Opcode =
519 Instruction::BinaryOpsEnd);
521 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
522 /// variable on which to base the steps, \p Step is the size of the step, and
523 /// \p EntryVal is the value from the original loop that maps to the steps.
524 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it
525 /// can be a truncate instruction).
526 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal,
527 const InductionDescriptor &ID);
529 /// Create a vector induction phi node based on an existing scalar one. \p
530 /// EntryVal is the value from the original loop that maps to the vector phi
531 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
532 /// truncate instruction, instead of widening the original IV, we widen a
533 /// version of the IV truncated to \p EntryVal's type.
534 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
535 Value *Step, Instruction *EntryVal);
537 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
538 /// is provided, the integer induction variable will first be truncated to
539 /// the corresponding type.
540 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
542 /// Returns true if an instruction \p I should be scalarized instead of
543 /// vectorized for the chosen vectorization factor.
544 bool shouldScalarizeInstruction(Instruction *I) const;
546 /// Returns true if we should generate a scalar version of \p IV.
547 bool needsScalarInduction(Instruction *IV) const;
549 /// Return a constant reference to the VectorParts corresponding to \p V from
550 /// the original loop. If the value has already been vectorized, the
551 /// corresponding vector entry in VectorLoopValueMap is returned. If,
552 /// however, the value has a scalar entry in VectorLoopValueMap, we construct
553 /// new vector values on-demand by inserting the scalar values into vectors
554 /// with an insertelement sequence. If the value has been neither vectorized
555 /// nor scalarized, it must be loop invariant, so we simply broadcast the
556 /// value into vectors.
557 const VectorParts &getVectorValue(Value *V);
559 /// Return a value in the new loop corresponding to \p V from the original
560 /// loop at unroll index \p Part and vector index \p Lane. If the value has
561 /// been vectorized but not scalarized, the necessary extractelement
562 /// instruction will be generated.
563 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane);
565 /// Try to vectorize the interleaved access group that \p Instr belongs to.
566 void vectorizeInterleaveGroup(Instruction *Instr);
568 /// Generate a shuffle sequence that will reverse the vector Vec.
569 virtual Value *reverseVector(Value *Vec);
571 /// Returns (and creates if needed) the original loop trip count.
572 Value *getOrCreateTripCount(Loop *NewLoop);
574 /// Returns (and creates if needed) the trip count of the widened loop.
575 Value *getOrCreateVectorTripCount(Loop *NewLoop);
577 /// Emit a bypass check to see if the trip count would overflow, or we
578 /// wouldn't have enough iterations to execute one vector loop.
579 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
580 /// Emit a bypass check to see if the vector trip count is nonzero.
581 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass);
582 /// Emit a bypass check to see if all of the SCEV assumptions we've
583 /// had to make are correct.
584 void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
585 /// Emit bypass checks to check any memory assumptions we may have made.
586 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
588 /// Add additional metadata to \p To that was not present on \p Orig.
590 /// Currently this is used to add the noalias annotations based on the
591 /// inserted memchecks. Use this for instructions that are *cloned* into the
593 void addNewMetadata(Instruction *To, const Instruction *Orig);
595 /// Add metadata from one instruction to another.
597 /// This includes both the original MDs from \p From and additional ones (\see
598 /// addNewMetadata). Use this for *newly created* instructions in the vector
600 void addMetadata(Instruction *To, Instruction *From);
602 /// \brief Similar to the previous function but it adds the metadata to a
603 /// vector of instructions.
604 void addMetadata(ArrayRef<Value *> To, Instruction *From);
606 /// \brief Set the debug location in the builder using the debug location in
608 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
610 /// This is a helper class for maintaining vectorization state. It's used for
611 /// mapping values from the original loop to their corresponding values in
612 /// the new loop. Two mappings are maintained: one for vectorized values and
613 /// one for scalarized values. Vectorized values are represented with UF
614 /// vector values in the new loop, and scalarized values are represented with
615 /// UF x VF scalar values in the new loop. UF and VF are the unroll and
616 /// vectorization factors, respectively.
618 /// Entries can be added to either map with initVector and initScalar, which
619 /// initialize and return a constant reference to the new entry. If a
620 /// non-constant reference to a vector entry is required, getVector can be
621 /// used to retrieve a mutable entry. We currently directly modify the mapped
622 /// values during "fix-up" operations that occur once the first phase of
623 /// widening is complete. These operations include type truncation and the
624 /// second phase of recurrence widening.
626 /// Otherwise, entries from either map should be accessed using the
627 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer.
628 /// getVectorValue and getScalarValue coordinate to generate a vector or
629 /// scalar value on-demand if one is not yet available. When vectorizing a
630 /// loop, we visit the definition of an instruction before its uses. When
631 /// visiting the definition, we either vectorize or scalarize the
632 /// instruction, creating an entry for it in the corresponding map. (In some
633 /// cases, such as induction variables, we will create both vector and scalar
634 /// entries.) Then, as we encounter uses of the definition, we derive values
635 /// for each scalar or vector use unless such a value is already available.
636 /// For example, if we scalarize a definition and one of its uses is vector,
637 /// we build the required vector on-demand with an insertelement sequence
638 /// when visiting the use. Otherwise, if the use is scalar, we can use the
639 /// existing scalar definition.
642 /// Construct an empty map with the given unroll and vectorization factors.
643 ValueMap(unsigned UnrollFactor, unsigned VecWidth)
644 : UF(UnrollFactor), VF(VecWidth) {
645 // The unroll and vectorization factors are only used in asserts builds
646 // to verify map entries are sized appropriately.
651 /// \return True if the map has a vector entry for \p Key.
652 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); }
654 /// \return True if the map has a scalar entry for \p Key.
655 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); }
657 /// \brief Map \p Key to the given VectorParts \p Entry, and return a
658 /// constant reference to the new vector map entry. The given key should
659 /// not already be in the map, and the given VectorParts should be
660 /// correctly sized for the current unroll factor.
661 const VectorParts &initVector(Value *Key, const VectorParts &Entry) {
662 assert(!hasVector(Key) && "Vector entry already initialized");
663 assert(Entry.size() == UF && "VectorParts has wrong dimensions");
664 VectorMapStorage[Key] = Entry;
665 return VectorMapStorage[Key];
668 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a
669 /// constant reference to the new scalar map entry. The given key should
670 /// not already be in the map, and the given ScalarParts should be
671 /// correctly sized for the current unroll and vectorization factors.
672 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) {
673 assert(!hasScalar(Key) && "Scalar entry already initialized");
674 assert(Entry.size() == UF &&
675 all_of(make_range(Entry.begin(), Entry.end()),
676 [&](const SmallVectorImpl<Value *> &Values) -> bool {
677 return Values.size() == VF;
679 "ScalarParts has wrong dimensions");
680 ScalarMapStorage[Key] = Entry;
681 return ScalarMapStorage[Key];
684 /// \return A reference to the vector map entry corresponding to \p Key.
685 /// The key should already be in the map. This function should only be used
686 /// when it's necessary to update values that have already been vectorized.
687 /// This is the case for "fix-up" operations including type truncation and
688 /// the second phase of recurrence vectorization. If a non-const reference
689 /// isn't required, getVectorValue should be used instead.
690 VectorParts &getVector(Value *Key) {
691 assert(hasVector(Key) && "Vector entry not initialized");
692 return VectorMapStorage.find(Key)->second;
695 /// Retrieve an entry from the vector or scalar maps. The preferred way to
696 /// access an existing mapped entry is with getVectorValue or
697 /// getScalarValue from InnerLoopVectorizer. Until those functions can be
698 /// moved inside ValueMap, we have to declare them as friends.
699 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V);
700 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part,
704 /// The unroll factor. Each entry in the vector map contains UF vector
708 /// The vectorization factor. Each entry in the scalar map contains UF x VF
712 /// The vector and scalar map storage. We use std::map and not DenseMap
713 /// because insertions to DenseMap invalidate its iterators.
714 std::map<Value *, VectorParts> VectorMapStorage;
715 std::map<Value *, ScalarParts> ScalarMapStorage;
718 /// The original loop.
720 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
721 /// dynamic knowledge to simplify SCEV expressions and converts them to a
722 /// more usable form.
723 PredicatedScalarEvolution &PSE;
730 /// Target Library Info.
731 const TargetLibraryInfo *TLI;
732 /// Target Transform Info.
733 const TargetTransformInfo *TTI;
734 /// Assumption Cache.
736 /// Interface to emit optimization remarks.
737 OptimizationRemarkEmitter *ORE;
739 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were
742 /// This is currently only used to add no-alias metadata based on the
743 /// memchecks. The actually versioning is performed manually.
744 std::unique_ptr<LoopVersioning> LVer;
746 /// The vectorization SIMD factor to use. Each vector will have this many
751 /// The vectorization unroll factor to use. Each scalar is vectorized to this
752 /// many different vector instructions.
755 /// The builder that we use
758 // --- Vectorization state ---
760 /// The vector-loop preheader.
761 BasicBlock *LoopVectorPreHeader;
762 /// The scalar-loop preheader.
763 BasicBlock *LoopScalarPreHeader;
764 /// Middle Block between the vector and the scalar.
765 BasicBlock *LoopMiddleBlock;
766 /// The ExitBlock of the scalar loop.
767 BasicBlock *LoopExitBlock;
768 /// The vector loop body.
769 BasicBlock *LoopVectorBody;
770 /// The scalar loop body.
771 BasicBlock *LoopScalarBody;
772 /// A list of all bypass blocks. The first block is the entry of the loop.
773 SmallVector<BasicBlock *, 4> LoopBypassBlocks;
775 /// The new Induction variable which was added to the new block.
777 /// The induction variable of the old basic block.
778 PHINode *OldInduction;
780 /// Maps values from the original loop to their corresponding values in the
781 /// vectorized loop. A key value can map to either vector values, scalar
782 /// values or both kinds of values, depending on whether the key was
783 /// vectorized and scalarized.
784 ValueMap VectorLoopValueMap;
786 /// Store instructions that should be predicated, as a pair
787 /// <StoreInst, Predicate>
788 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions;
789 EdgeMaskCacheTy EdgeMaskCache;
790 BlockMaskCacheTy BlockMaskCache;
791 /// Trip count of the original loop.
793 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
794 Value *VectorTripCount;
796 /// The legality analysis.
797 LoopVectorizationLegality *Legal;
799 /// The profitablity analysis.
800 LoopVectorizationCostModel *Cost;
802 // Record whether runtime checks are added.
803 bool AddedSafetyChecks;
805 // Holds the end values for each induction variable. We save the end values
806 // so we can later fix-up the external users of the induction variables.
807 DenseMap<PHINode *, Value *> IVEndValues;
810 class InnerLoopUnroller : public InnerLoopVectorizer {
812 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
813 LoopInfo *LI, DominatorTree *DT,
814 const TargetLibraryInfo *TLI,
815 const TargetTransformInfo *TTI, AssumptionCache *AC,
816 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
817 LoopVectorizationLegality *LVL,
818 LoopVectorizationCostModel *CM)
819 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
820 UnrollFactor, LVL, CM) {}
823 void vectorizeMemoryInstruction(Instruction *Instr) override;
824 Value *getBroadcastInstrs(Value *V) override;
825 Value *getStepVector(Value *Val, int StartIdx, Value *Step,
826 Instruction::BinaryOps Opcode =
827 Instruction::BinaryOpsEnd) override;
828 Value *reverseVector(Value *Vec) override;
831 /// \brief Look for a meaningful debug location on the instruction or it's
833 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
838 if (I->getDebugLoc() != Empty)
841 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
842 if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
843 if (OpInst->getDebugLoc() != Empty)
850 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
851 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
852 const DILocation *DIL = Inst->getDebugLoc();
853 if (DIL && Inst->getFunction()->isDebugInfoForProfiling())
854 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
856 B.SetCurrentDebugLocation(DIL);
858 B.SetCurrentDebugLocation(DebugLoc());
862 /// \return string containing a file name and a line # for the given loop.
863 static std::string getDebugLocString(const Loop *L) {
866 raw_string_ostream OS(Result);
867 if (const DebugLoc LoopDbgLoc = L->getStartLoc())
868 LoopDbgLoc.print(OS);
870 // Just print the module name.
871 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
878 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
879 const Instruction *Orig) {
880 // If the loop was versioned with memchecks, add the corresponding no-alias
882 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
883 LVer->annotateInstWithNoAlias(To, Orig);
886 void InnerLoopVectorizer::addMetadata(Instruction *To,
888 propagateMetadata(To, From);
889 addNewMetadata(To, From);
892 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
894 for (Value *V : To) {
895 if (Instruction *I = dyn_cast<Instruction>(V))
896 addMetadata(I, From);
900 /// \brief The group of interleaved loads/stores sharing the same stride and
901 /// close to each other.
903 /// Each member in this group has an index starting from 0, and the largest
904 /// index should be less than interleaved factor, which is equal to the absolute
905 /// value of the access's stride.
907 /// E.g. An interleaved load group of factor 4:
908 /// for (unsigned i = 0; i < 1024; i+=4) {
909 /// a = A[i]; // Member of index 0
910 /// b = A[i+1]; // Member of index 1
911 /// d = A[i+3]; // Member of index 3
915 /// An interleaved store group of factor 4:
916 /// for (unsigned i = 0; i < 1024; i+=4) {
918 /// A[i] = a; // Member of index 0
919 /// A[i+1] = b; // Member of index 1
920 /// A[i+2] = c; // Member of index 2
921 /// A[i+3] = d; // Member of index 3
924 /// Note: the interleaved load group could have gaps (missing members), but
925 /// the interleaved store group doesn't allow gaps.
926 class InterleaveGroup {
928 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
929 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) {
930 assert(Align && "The alignment should be non-zero");
932 Factor = std::abs(Stride);
933 assert(Factor > 1 && "Invalid interleave factor");
935 Reverse = Stride < 0;
939 bool isReverse() const { return Reverse; }
940 unsigned getFactor() const { return Factor; }
941 unsigned getAlignment() const { return Align; }
942 unsigned getNumMembers() const { return Members.size(); }
944 /// \brief Try to insert a new member \p Instr with index \p Index and
945 /// alignment \p NewAlign. The index is related to the leader and it could be
946 /// negative if it is the new leader.
948 /// \returns false if the instruction doesn't belong to the group.
949 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
950 assert(NewAlign && "The new member's alignment should be non-zero");
952 int Key = Index + SmallestKey;
954 // Skip if there is already a member with the same index.
955 if (Members.count(Key))
958 if (Key > LargestKey) {
959 // The largest index is always less than the interleave factor.
960 if (Index >= static_cast<int>(Factor))
964 } else if (Key < SmallestKey) {
965 // The largest index is always less than the interleave factor.
966 if (LargestKey - Key >= static_cast<int>(Factor))
972 // It's always safe to select the minimum alignment.
973 Align = std::min(Align, NewAlign);
974 Members[Key] = Instr;
978 /// \brief Get the member with the given index \p Index
980 /// \returns nullptr if contains no such member.
981 Instruction *getMember(unsigned Index) const {
982 int Key = SmallestKey + Index;
983 if (!Members.count(Key))
986 return Members.find(Key)->second;
989 /// \brief Get the index for the given member. Unlike the key in the member
990 /// map, the index starts from 0.
991 unsigned getIndex(Instruction *Instr) const {
992 for (auto I : Members)
993 if (I.second == Instr)
994 return I.first - SmallestKey;
996 llvm_unreachable("InterleaveGroup contains no such member");
999 Instruction *getInsertPos() const { return InsertPos; }
1000 void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
1003 unsigned Factor; // Interleave Factor.
1006 DenseMap<int, Instruction *> Members;
1010 // To avoid breaking dependences, vectorized instructions of an interleave
1011 // group should be inserted at either the first load or the last store in
1014 // E.g. %even = load i32 // Insert Position
1015 // %add = add i32 %even // Use of %even
1019 // %odd = add i32 // Def of %odd
1020 // store i32 %odd // Insert Position
1021 Instruction *InsertPos;
1024 /// \brief Drive the analysis of interleaved memory accesses in the loop.
1026 /// Use this class to analyze interleaved accesses only when we can vectorize
1027 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
1028 /// on interleaved accesses is unsafe.
1030 /// The analysis collects interleave groups and records the relationships
1031 /// between the member and the group in a map.
1032 class InterleavedAccessInfo {
1034 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
1035 DominatorTree *DT, LoopInfo *LI)
1036 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr),
1037 RequiresScalarEpilogue(false) {}
1039 ~InterleavedAccessInfo() {
1040 SmallSet<InterleaveGroup *, 4> DelSet;
1041 // Avoid releasing a pointer twice.
1042 for (auto &I : InterleaveGroupMap)
1043 DelSet.insert(I.second);
1044 for (auto *Ptr : DelSet)
1048 /// \brief Analyze the interleaved accesses and collect them in interleave
1049 /// groups. Substitute symbolic strides using \p Strides.
1050 void analyzeInterleaving(const ValueToValueMap &Strides);
1052 /// \brief Check if \p Instr belongs to any interleave group.
1053 bool isInterleaved(Instruction *Instr) const {
1054 return InterleaveGroupMap.count(Instr);
1057 /// \brief Return the maximum interleave factor of all interleaved groups.
1058 unsigned getMaxInterleaveFactor() const {
1059 unsigned MaxFactor = 1;
1060 for (auto &Entry : InterleaveGroupMap)
1061 MaxFactor = std::max(MaxFactor, Entry.second->getFactor());
1065 /// \brief Get the interleave group that \p Instr belongs to.
1067 /// \returns nullptr if doesn't have such group.
1068 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
1069 if (InterleaveGroupMap.count(Instr))
1070 return InterleaveGroupMap.find(Instr)->second;
1074 /// \brief Returns true if an interleaved group that may access memory
1075 /// out-of-bounds requires a scalar epilogue iteration for correctness.
1076 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
1078 /// \brief Initialize the LoopAccessInfo used for dependence checking.
1079 void setLAI(const LoopAccessInfo *Info) { LAI = Info; }
1082 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
1083 /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
1084 /// The interleaved access analysis can also add new predicates (for example
1085 /// by versioning strides of pointers).
1086 PredicatedScalarEvolution &PSE;
1090 const LoopAccessInfo *LAI;
1092 /// True if the loop may contain non-reversed interleaved groups with
1093 /// out-of-bounds accesses. We ensure we don't speculatively access memory
1094 /// out-of-bounds by executing at least one scalar epilogue iteration.
1095 bool RequiresScalarEpilogue;
1097 /// Holds the relationships between the members and the interleave group.
1098 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
1100 /// Holds dependences among the memory accesses in the loop. It maps a source
1101 /// access to a set of dependent sink accesses.
1102 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
1104 /// \brief The descriptor for a strided memory access.
1105 struct StrideDescriptor {
1106 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
1108 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
1110 StrideDescriptor() = default;
1112 // The access's stride. It is negative for a reverse access.
1114 const SCEV *Scev = nullptr; // The scalar expression of this access
1115 uint64_t Size = 0; // The size of the memory object.
1116 unsigned Align = 0; // The alignment of this access.
1119 /// \brief A type for holding instructions and their stride descriptors.
1120 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry;
1122 /// \brief Create a new interleave group with the given instruction \p Instr,
1123 /// stride \p Stride and alignment \p Align.
1125 /// \returns the newly created interleave group.
1126 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
1128 assert(!InterleaveGroupMap.count(Instr) &&
1129 "Already in an interleaved access group");
1130 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
1131 return InterleaveGroupMap[Instr];
1134 /// \brief Release the group and remove all the relationships.
1135 void releaseGroup(InterleaveGroup *Group) {
1136 for (unsigned i = 0; i < Group->getFactor(); i++)
1137 if (Instruction *Member = Group->getMember(i))
1138 InterleaveGroupMap.erase(Member);
1143 /// \brief Collect all the accesses with a constant stride in program order.
1144 void collectConstStrideAccesses(
1145 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1146 const ValueToValueMap &Strides);
1148 /// \brief Returns true if \p Stride is allowed in an interleaved group.
1149 static bool isStrided(int Stride) {
1150 unsigned Factor = std::abs(Stride);
1151 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1154 /// \brief Returns true if \p BB is a predicated block.
1155 bool isPredicated(BasicBlock *BB) const {
1156 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1159 /// \brief Returns true if LoopAccessInfo can be used for dependence queries.
1160 bool areDependencesValid() const {
1161 return LAI && LAI->getDepChecker().getDependences();
1164 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if
1165 /// necessary, when constructing interleaved groups.
1167 /// \p A must precede \p B in program order. We return false if reordering is
1168 /// not necessary or is prevented because \p A and \p B may be dependent.
1169 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
1170 StrideEntry *B) const {
1172 // Code motion for interleaved accesses can potentially hoist strided loads
1173 // and sink strided stores. The code below checks the legality of the
1174 // following two conditions:
1176 // 1. Potentially moving a strided load (B) before any store (A) that
1179 // 2. Potentially moving a strided store (A) after any load or store (B)
1182 // It's legal to reorder A and B if we know there isn't a dependence from A
1183 // to B. Note that this determination is conservative since some
1184 // dependences could potentially be reordered safely.
1186 // A is potentially the source of a dependence.
1187 auto *Src = A->first;
1188 auto SrcDes = A->second;
1190 // B is potentially the sink of a dependence.
1191 auto *Sink = B->first;
1192 auto SinkDes = B->second;
1194 // Code motion for interleaved accesses can't violate WAR dependences.
1195 // Thus, reordering is legal if the source isn't a write.
1196 if (!Src->mayWriteToMemory())
1199 // At least one of the accesses must be strided.
1200 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
1203 // If dependence information is not available from LoopAccessInfo,
1204 // conservatively assume the instructions can't be reordered.
1205 if (!areDependencesValid())
1208 // If we know there is a dependence from source to sink, assume the
1209 // instructions can't be reordered. Otherwise, reordering is legal.
1210 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
1213 /// \brief Collect the dependences from LoopAccessInfo.
1215 /// We process the dependences once during the interleaved access analysis to
1216 /// enable constant-time dependence queries.
1217 void collectDependences() {
1218 if (!areDependencesValid())
1220 auto *Deps = LAI->getDepChecker().getDependences();
1221 for (auto Dep : *Deps)
1222 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
1226 /// Utility class for getting and setting loop vectorizer hints in the form
1227 /// of loop metadata.
1228 /// This class keeps a number of loop annotations locally (as member variables)
1229 /// and can, upon request, write them back as metadata on the loop. It will
1230 /// initially scan the loop for existing metadata, and will update the local
1231 /// values based on information in the loop.
1232 /// We cannot write all values to metadata, as the mere presence of some info,
1233 /// for example 'force', means a decision has been made. So, we need to be
1234 /// careful NOT to add them if the user hasn't specifically asked so.
1235 class LoopVectorizeHints {
1236 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE };
1238 /// Hint - associates name and validation with the hint value.
1241 unsigned Value; // This may have to change for non-numeric values.
1244 Hint(const char *Name, unsigned Value, HintKind Kind)
1245 : Name(Name), Value(Value), Kind(Kind) {}
1247 bool validate(unsigned Val) {
1250 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
1252 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
1260 /// Vectorization width.
1262 /// Vectorization interleave factor.
1264 /// Vectorization forced
1267 /// Return the loop metadata prefix.
1268 static StringRef Prefix() { return "llvm.loop."; }
1270 /// True if there is any unsafe math in the loop.
1271 bool PotentiallyUnsafe;
1275 FK_Undefined = -1, ///< Not selected.
1276 FK_Disabled = 0, ///< Forcing disabled.
1277 FK_Enabled = 1, ///< Forcing enabled.
1280 LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
1281 OptimizationRemarkEmitter &ORE)
1282 : Width("vectorize.width", VectorizerParams::VectorizationFactor,
1284 Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
1285 Force("vectorize.enable", FK_Undefined, HK_FORCE),
1286 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) {
1287 // Populate values with existing loop metadata.
1288 getHintsFromMetadata();
1290 // force-vector-interleave overrides DisableInterleaving.
1291 if (VectorizerParams::isInterleaveForced())
1292 Interleave.Value = VectorizerParams::VectorizationInterleave;
1294 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
1295 << "LV: Interleaving disabled by the pass manager\n");
1298 /// Mark the loop L as already vectorized by setting the width to 1.
1299 void setAlreadyVectorized() {
1300 Width.Value = Interleave.Value = 1;
1301 Hint Hints[] = {Width, Interleave};
1302 writeHintsToMetadata(Hints);
1305 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const {
1306 if (getForce() == LoopVectorizeHints::FK_Disabled) {
1307 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
1308 emitRemarkWithHints();
1312 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
1313 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
1314 emitRemarkWithHints();
1318 if (getWidth() == 1 && getInterleave() == 1) {
1319 // FIXME: Add a separate metadata to indicate when the loop has already
1320 // been vectorized instead of setting width and count to 1.
1321 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
1322 // FIXME: Add interleave.disable metadata. This will allow
1323 // vectorize.disable to be used without disabling the pass and errors
1324 // to differentiate between disabled vectorization and a width of 1.
1325 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
1326 "AllDisabled", L->getStartLoc(),
1328 << "loop not vectorized: vectorization and interleaving are "
1329 "explicitly disabled, or vectorize width and interleave "
1330 "count are both set to 1");
1337 /// Dumps all the hint information.
1338 void emitRemarkWithHints() const {
1339 using namespace ore;
1340 if (Force.Value == LoopVectorizeHints::FK_Disabled)
1341 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
1342 TheLoop->getStartLoc(),
1343 TheLoop->getHeader())
1344 << "loop not vectorized: vectorization is explicitly disabled");
1346 OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
1347 TheLoop->getStartLoc(), TheLoop->getHeader());
1348 R << "loop not vectorized";
1349 if (Force.Value == LoopVectorizeHints::FK_Enabled) {
1350 R << " (Force=" << NV("Force", true);
1351 if (Width.Value != 0)
1352 R << ", Vector Width=" << NV("VectorWidth", Width.Value);
1353 if (Interleave.Value != 0)
1354 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value);
1361 unsigned getWidth() const { return Width.Value; }
1362 unsigned getInterleave() const { return Interleave.Value; }
1363 enum ForceKind getForce() const { return (ForceKind)Force.Value; }
1365 /// \brief If hints are provided that force vectorization, use the AlwaysPrint
1366 /// pass name to force the frontend to print the diagnostic.
1367 const char *vectorizeAnalysisPassName() const {
1368 if (getWidth() == 1)
1370 if (getForce() == LoopVectorizeHints::FK_Disabled)
1372 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
1374 return OptimizationRemarkAnalysis::AlwaysPrint;
1377 bool allowReordering() const {
1378 // When enabling loop hints are provided we allow the vectorizer to change
1379 // the order of operations that is given by the scalar loop. This is not
1380 // enabled by default because can be unsafe or inefficient. For example,
1381 // reordering floating-point operations will change the way round-off
1382 // error accumulates in the loop.
1383 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
1386 bool isPotentiallyUnsafe() const {
1387 // Avoid FP vectorization if the target is unsure about proper support.
1388 // This may be related to the SIMD unit in the target not handling
1389 // IEEE 754 FP ops properly, or bad single-to-double promotions.
1390 // Otherwise, a sequence of vectorized loops, even without reduction,
1391 // could lead to different end results on the destination vectors.
1392 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
1395 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
1398 /// Find hints specified in the loop metadata and update local values.
1399 void getHintsFromMetadata() {
1400 MDNode *LoopID = TheLoop->getLoopID();
1404 // First operand should refer to the loop id itself.
1405 assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
1406 assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
1408 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1409 const MDString *S = nullptr;
1410 SmallVector<Metadata *, 4> Args;
1412 // The expected hint is either a MDString or a MDNode with the first
1413 // operand a MDString.
1414 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
1415 if (!MD || MD->getNumOperands() == 0)
1417 S = dyn_cast<MDString>(MD->getOperand(0));
1418 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
1419 Args.push_back(MD->getOperand(i));
1421 S = dyn_cast<MDString>(LoopID->getOperand(i));
1422 assert(Args.size() == 0 && "too many arguments for MDString");
1428 // Check if the hint starts with the loop metadata prefix.
1429 StringRef Name = S->getString();
1430 if (Args.size() == 1)
1431 setHint(Name, Args[0]);
1435 /// Checks string hint with one operand and set value if valid.
1436 void setHint(StringRef Name, Metadata *Arg) {
1437 if (!Name.startswith(Prefix()))
1439 Name = Name.substr(Prefix().size(), StringRef::npos);
1441 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
1444 unsigned Val = C->getZExtValue();
1446 Hint *Hints[] = {&Width, &Interleave, &Force};
1447 for (auto H : Hints) {
1448 if (Name == H->Name) {
1449 if (H->validate(Val))
1452 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
1458 /// Create a new hint from name / value pair.
1459 MDNode *createHintMetadata(StringRef Name, unsigned V) const {
1460 LLVMContext &Context = TheLoop->getHeader()->getContext();
1461 Metadata *MDs[] = {MDString::get(Context, Name),
1462 ConstantAsMetadata::get(
1463 ConstantInt::get(Type::getInt32Ty(Context), V))};
1464 return MDNode::get(Context, MDs);
1467 /// Matches metadata with hint name.
1468 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
1469 MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
1473 for (auto H : HintTypes)
1474 if (Name->getString().endswith(H.Name))
1479 /// Sets current hints into loop metadata, keeping other values intact.
1480 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) {
1481 if (HintTypes.size() == 0)
1484 // Reserve the first element to LoopID (see below).
1485 SmallVector<Metadata *, 4> MDs(1);
1486 // If the loop already has metadata, then ignore the existing operands.
1487 MDNode *LoopID = TheLoop->getLoopID();
1489 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1490 MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
1491 // If node in update list, ignore old value.
1492 if (!matchesHintMetadataName(Node, HintTypes))
1493 MDs.push_back(Node);
1497 // Now, add the missing hints.
1498 for (auto H : HintTypes)
1499 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value));
1501 // Replace current metadata node with new one.
1502 LLVMContext &Context = TheLoop->getHeader()->getContext();
1503 MDNode *NewLoopID = MDNode::get(Context, MDs);
1504 // Set operand 0 to refer to the loop id itself.
1505 NewLoopID->replaceOperandWith(0, NewLoopID);
1507 TheLoop->setLoopID(NewLoopID);
1510 /// The loop these hints belong to.
1511 const Loop *TheLoop;
1513 /// Interface to emit optimization remarks.
1514 OptimizationRemarkEmitter &ORE;
1517 static void emitMissedWarning(Function *F, Loop *L,
1518 const LoopVectorizeHints &LH,
1519 OptimizationRemarkEmitter *ORE) {
1520 LH.emitRemarkWithHints();
1522 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1523 if (LH.getWidth() != 1)
1524 ORE->emit(DiagnosticInfoOptimizationFailure(
1525 DEBUG_TYPE, "FailedRequestedVectorization",
1526 L->getStartLoc(), L->getHeader())
1527 << "loop not vectorized: "
1528 << "failed explicitly specified loop vectorization");
1529 else if (LH.getInterleave() != 1)
1530 ORE->emit(DiagnosticInfoOptimizationFailure(
1531 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
1533 << "loop not interleaved: "
1534 << "failed explicitly specified loop interleaving");
1538 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
1539 /// to what vectorization factor.
1540 /// This class does not look at the profitability of vectorization, only the
1541 /// legality. This class has two main kinds of checks:
1542 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
1543 /// will change the order of memory accesses in a way that will change the
1544 /// correctness of the program.
1545 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
1546 /// checks for a number of different conditions, such as the availability of a
1547 /// single induction variable, that all types are supported and vectorize-able,
1548 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
1549 /// This class is also used by InnerLoopVectorizer for identifying
1550 /// induction variable and the different reduction variables.
1551 class LoopVectorizationLegality {
1553 LoopVectorizationLegality(
1554 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
1555 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F,
1556 const TargetTransformInfo *TTI,
1557 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI,
1558 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R,
1559 LoopVectorizeHints *H)
1560 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT),
1561 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI),
1562 PrimaryInduction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false),
1563 Requirements(R), Hints(H) {}
1565 /// ReductionList contains the reduction descriptors for all
1566 /// of the reductions that were found in the loop.
1567 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList;
1569 /// InductionList saves induction variables and maps them to the
1570 /// induction descriptor.
1571 typedef MapVector<PHINode *, InductionDescriptor> InductionList;
1573 /// RecurrenceSet contains the phi nodes that are recurrences other than
1574 /// inductions and reductions.
1575 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet;
1577 /// Returns true if it is legal to vectorize this loop.
1578 /// This does not mean that it is profitable to vectorize this
1579 /// loop, only that it is legal to do so.
1580 bool canVectorize();
1582 /// Returns the primary induction variable.
1583 PHINode *getPrimaryInduction() { return PrimaryInduction; }
1585 /// Returns the reduction variables found in the loop.
1586 ReductionList *getReductionVars() { return &Reductions; }
1588 /// Returns the induction variables found in the loop.
1589 InductionList *getInductionVars() { return &Inductions; }
1591 /// Return the first-order recurrences found in the loop.
1592 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
1594 /// Returns the widest induction type.
1595 Type *getWidestInductionType() { return WidestIndTy; }
1597 /// Returns True if V is an induction variable in this loop.
1598 bool isInductionVariable(const Value *V);
1600 /// Returns True if PN is a reduction variable in this loop.
1601 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
1603 /// Returns True if Phi is a first-order recurrence in this loop.
1604 bool isFirstOrderRecurrence(const PHINode *Phi);
1606 /// Return true if the block BB needs to be predicated in order for the loop
1607 /// to be vectorized.
1608 bool blockNeedsPredication(BasicBlock *BB);
1610 /// Check if this pointer is consecutive when vectorizing. This happens
1611 /// when the last index of the GEP is the induction variable, or that the
1612 /// pointer itself is an induction variable.
1613 /// This check allows us to vectorize A[idx] into a wide load/store.
1615 /// 0 - Stride is unknown or non-consecutive.
1616 /// 1 - Address is consecutive.
1617 /// -1 - Address is consecutive, and decreasing.
1618 int isConsecutivePtr(Value *Ptr);
1620 /// Returns true if the value V is uniform within the loop.
1621 bool isUniform(Value *V);
1623 /// Returns the information that we collected about runtime memory check.
1624 const RuntimePointerChecking *getRuntimePointerChecking() const {
1625 return LAI->getRuntimePointerChecking();
1628 const LoopAccessInfo *getLAI() const { return LAI; }
1630 /// \brief Check if \p Instr belongs to any interleaved access group.
1631 bool isAccessInterleaved(Instruction *Instr) {
1632 return InterleaveInfo.isInterleaved(Instr);
1635 /// \brief Return the maximum interleave factor of all interleaved groups.
1636 unsigned getMaxInterleaveFactor() const {
1637 return InterleaveInfo.getMaxInterleaveFactor();
1640 /// \brief Get the interleaved access group that \p Instr belongs to.
1641 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1642 return InterleaveInfo.getInterleaveGroup(Instr);
1645 /// \brief Returns true if an interleaved group requires a scalar iteration
1646 /// to handle accesses with gaps.
1647 bool requiresScalarEpilogue() const {
1648 return InterleaveInfo.requiresScalarEpilogue();
1651 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
1653 bool hasStride(Value *V) { return LAI->hasStride(V); }
1655 /// Returns true if the target machine supports masked store operation
1656 /// for the given \p DataType and kind of access to \p Ptr.
1657 bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1658 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType);
1660 /// Returns true if the target machine supports masked load operation
1661 /// for the given \p DataType and kind of access to \p Ptr.
1662 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1663 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType);
1665 /// Returns true if the target machine supports masked scatter operation
1666 /// for the given \p DataType.
1667 bool isLegalMaskedScatter(Type *DataType) {
1668 return TTI->isLegalMaskedScatter(DataType);
1670 /// Returns true if the target machine supports masked gather operation
1671 /// for the given \p DataType.
1672 bool isLegalMaskedGather(Type *DataType) {
1673 return TTI->isLegalMaskedGather(DataType);
1675 /// Returns true if the target machine can represent \p V as a masked gather
1676 /// or scatter operation.
1677 bool isLegalGatherOrScatter(Value *V) {
1678 auto *LI = dyn_cast<LoadInst>(V);
1679 auto *SI = dyn_cast<StoreInst>(V);
1682 auto *Ptr = getPointerOperand(V);
1683 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1684 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
1687 /// Returns true if vector representation of the instruction \p I
1689 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
1690 unsigned getNumStores() const { return LAI->getNumStores(); }
1691 unsigned getNumLoads() const { return LAI->getNumLoads(); }
1692 unsigned getNumPredStores() const { return NumPredStores; }
1694 /// Returns true if \p I is an instruction that will be scalarized with
1695 /// predication. Such instructions include conditional stores and
1696 /// instructions that may divide by zero.
1697 bool isScalarWithPredication(Instruction *I);
1699 /// Returns true if \p I is a memory instruction with consecutive memory
1700 /// access that can be widened.
1701 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1704 /// Check if a single basic block loop is vectorizable.
1705 /// At this point we know that this is a loop with a constant trip count
1706 /// and we only need to check individual instructions.
1707 bool canVectorizeInstrs();
1709 /// When we vectorize loops we may change the order in which
1710 /// we read and write from memory. This method checks if it is
1711 /// legal to vectorize the code, considering only memory constrains.
1712 /// Returns true if the loop is vectorizable
1713 bool canVectorizeMemory();
1715 /// Return true if we can vectorize this loop using the IF-conversion
1717 bool canVectorizeWithIfConvert();
1719 /// Return true if all of the instructions in the block can be speculatively
1720 /// executed. \p SafePtrs is a list of addresses that are known to be legal
1721 /// and we know that we can read from them without segfault.
1722 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
1724 /// Updates the vectorization state by adding \p Phi to the inductions list.
1725 /// This can set \p Phi as the main induction of the loop if \p Phi is a
1726 /// better choice for the main induction than the existing one.
1727 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
1728 SmallPtrSetImpl<Value *> &AllowedExit);
1730 /// Create an analysis remark that explains why vectorization failed
1732 /// \p RemarkName is the identifier for the remark. If \p I is passed it is
1733 /// an instruction that prevents vectorization. Otherwise the loop is used
1734 /// for the location of the remark. \return the remark object that can be
1736 OptimizationRemarkAnalysis
1737 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const {
1738 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
1739 RemarkName, TheLoop, I);
1742 /// \brief If an access has a symbolic strides, this maps the pointer value to
1743 /// the stride symbol.
1744 const ValueToValueMap *getSymbolicStrides() {
1745 // FIXME: Currently, the set of symbolic strides is sometimes queried before
1746 // it's collected. This happens from canVectorizeWithIfConvert, when the
1747 // pointer is checked to reference consecutive elements suitable for a
1749 return LAI ? &LAI->getSymbolicStrides() : nullptr;
1752 unsigned NumPredStores;
1754 /// The loop that we evaluate.
1756 /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
1757 /// Applies dynamic knowledge to simplify SCEV expressions in the context
1758 /// of existing SCEV assumptions. The analysis will also add a minimal set
1759 /// of new predicates if this is required to enable vectorization and
1761 PredicatedScalarEvolution &PSE;
1762 /// Target Library Info.
1763 TargetLibraryInfo *TLI;
1764 /// Target Transform Info
1765 const TargetTransformInfo *TTI;
1768 // LoopAccess analysis.
1769 std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
1770 // And the loop-accesses info corresponding to this loop. This pointer is
1771 // null until canVectorizeMemory sets it up.
1772 const LoopAccessInfo *LAI;
1773 /// Interface to emit optimization remarks.
1774 OptimizationRemarkEmitter *ORE;
1776 /// The interleave access information contains groups of interleaved accesses
1777 /// with the same stride and close to each other.
1778 InterleavedAccessInfo InterleaveInfo;
1780 // --- vectorization state --- //
1782 /// Holds the primary induction variable. This is the counter of the
1784 PHINode *PrimaryInduction;
1785 /// Holds the reduction variables.
1786 ReductionList Reductions;
1787 /// Holds all of the induction variables that we found in the loop.
1788 /// Notice that inductions don't need to start at zero and that induction
1789 /// variables can be pointers.
1790 InductionList Inductions;
1791 /// Holds the phi nodes that are first-order recurrences.
1792 RecurrenceSet FirstOrderRecurrences;
1793 /// Holds the widest induction type encountered.
1796 /// Allowed outside users. This holds the induction and reduction
1797 /// vars which can be accessed from outside the loop.
1798 SmallPtrSet<Value *, 4> AllowedExit;
1800 /// Can we assume the absence of NaNs.
1801 bool HasFunNoNaNAttr;
1803 /// Vectorization requirements that will go through late-evaluation.
1804 LoopVectorizationRequirements *Requirements;
1806 /// Used to emit an analysis of any legality issues.
1807 LoopVectorizeHints *Hints;
1809 /// While vectorizing these instructions we have to generate a
1810 /// call to the appropriate masked intrinsic
1811 SmallPtrSet<const Instruction *, 8> MaskedOp;
1814 /// LoopVectorizationCostModel - estimates the expected speedups due to
1816 /// In many cases vectorization is not profitable. This can happen because of
1817 /// a number of reasons. In this class we mainly attempt to predict the
1818 /// expected speedup/slowdowns due to the supported instruction set. We use the
1819 /// TargetTransformInfo to query the different backends for the cost of
1820 /// different operations.
1821 class LoopVectorizationCostModel {
1823 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
1824 LoopInfo *LI, LoopVectorizationLegality *Legal,
1825 const TargetTransformInfo &TTI,
1826 const TargetLibraryInfo *TLI, DemandedBits *DB,
1827 AssumptionCache *AC,
1828 OptimizationRemarkEmitter *ORE, const Function *F,
1829 const LoopVectorizeHints *Hints)
1830 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1831 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {}
1833 /// \return An upper bound for the vectorization factor, or None if
1834 /// vectorization should be avoided up front.
1835 Optional<unsigned> computeMaxVF(bool OptForSize);
1837 /// Information about vectorization costs
1838 struct VectorizationFactor {
1839 unsigned Width; // Vector width with best cost
1840 unsigned Cost; // Cost of the loop with that width
1842 /// \return The most profitable vectorization factor and the cost of that VF.
1843 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1844 /// then this vectorization factor will be selected if vectorization is
1846 VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1848 /// Setup cost-based decisions for user vectorization factor.
1849 void selectUserVectorizationFactor(unsigned UserVF) {
1850 collectUniformsAndScalars(UserVF);
1851 collectInstsToScalarize(UserVF);
1854 /// \return The size (in bits) of the smallest and widest types in the code
1855 /// that needs to be vectorized. We ignore values that remain scalar such as
1856 /// 64 bit loop indices.
1857 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1859 /// \return The desired interleave count.
1860 /// If interleave count has been specified by metadata it will be returned.
1861 /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1862 /// are the selected vectorization factor and the cost of the selected VF.
1863 unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1866 /// Memory access instruction may be vectorized in more than one way.
1867 /// Form of instruction after vectorization depends on cost.
1868 /// This function takes cost-based decisions for Load/Store instructions
1869 /// and collects them in a map. This decisions map is used for building
1870 /// the lists of loop-uniform and loop-scalar instructions.
1871 /// The calculated cost is saved with widening decision in order to
1872 /// avoid redundant calculations.
1873 void setCostBasedWideningDecision(unsigned VF);
1875 /// \brief A struct that represents some properties of the register usage
1877 struct RegisterUsage {
1878 /// Holds the number of loop invariant values that are used in the loop.
1879 unsigned LoopInvariantRegs;
1880 /// Holds the maximum number of concurrent live intervals in the loop.
1881 unsigned MaxLocalUsers;
1882 /// Holds the number of instructions in the loop.
1883 unsigned NumInstructions;
1886 /// \return Returns information about the register usages of the loop for the
1887 /// given vectorization factors.
1888 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1890 /// Collect values we want to ignore in the cost model.
1891 void collectValuesToIgnore();
1893 /// \returns The smallest bitwidth each instruction can be represented with.
1894 /// The vector equivalents of these instructions should be truncated to this
1896 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1900 /// \returns True if it is more profitable to scalarize instruction \p I for
1901 /// vectorization factor \p VF.
1902 bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1903 auto Scalars = InstsToScalarize.find(VF);
1904 assert(Scalars != InstsToScalarize.end() &&
1905 "VF not yet analyzed for scalarization profitability");
1906 return Scalars->second.count(I);
1909 /// Returns true if \p I is known to be uniform after vectorization.
1910 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1913 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity");
1914 auto UniformsPerVF = Uniforms.find(VF);
1915 return UniformsPerVF->second.count(I);
1918 /// Returns true if \p I is known to be scalar after vectorization.
1919 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1922 assert(Scalars.count(VF) && "Scalar values are not calculated for VF");
1923 auto ScalarsPerVF = Scalars.find(VF);
1924 return ScalarsPerVF->second.count(I);
1927 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1928 /// for vectorization factor \p VF.
1929 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1930 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) &&
1931 !isScalarAfterVectorization(I, VF);
1934 /// Decision that was taken during cost calculation for memory instruction.
1943 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1944 /// instruction \p I and vector width \p VF.
1945 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1947 assert(VF >= 2 && "Expected VF >=2");
1948 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1951 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1952 /// interleaving group \p Grp and vector width \p VF.
1953 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
1954 InstWidening W, unsigned Cost) {
1955 assert(VF >= 2 && "Expected VF >=2");
1956 /// Broadcast this decicion to all instructions inside the group.
1957 /// But the cost will be assigned to one instruction only.
1958 for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1959 if (auto *I = Grp->getMember(i)) {
1960 if (Grp->getInsertPos() == I)
1961 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1963 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1968 /// Return the cost model decision for the given instruction \p I and vector
1969 /// width \p VF. Return CM_Unknown if this instruction did not pass
1970 /// through the cost modeling.
1971 InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1972 assert(VF >= 2 && "Expected VF >=2");
1973 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1974 auto Itr = WideningDecisions.find(InstOnVF);
1975 if (Itr == WideningDecisions.end())
1977 return Itr->second.first;
1980 /// Return the vectorization cost for the given instruction \p I and vector
1982 unsigned getWideningCost(Instruction *I, unsigned VF) {
1983 assert(VF >= 2 && "Expected VF >=2");
1984 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1985 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated");
1986 return WideningDecisions[InstOnVF].second;
1989 /// Return True if instruction \p I is an optimizable truncate whose operand
1990 /// is an induction variable. Such a truncate will be removed by adding a new
1991 /// induction variable with the destination type.
1992 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1994 // If the instruction is not a truncate, return false.
1995 auto *Trunc = dyn_cast<TruncInst>(I);
1999 // Get the source and destination types of the truncate.
2000 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
2001 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
2003 // If the truncate is free for the given types, return false. Replacing a
2004 // free truncate with an induction variable would add an induction variable
2005 // update instruction to each iteration of the loop. We exclude from this
2006 // check the primary induction variable since it will need an update
2007 // instruction regardless.
2008 Value *Op = Trunc->getOperand(0);
2009 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
2012 // If the truncated value is not an induction variable, return false.
2013 return Legal->isInductionVariable(Op);
2017 /// \return An upper bound for the vectorization factor, larger than zero.
2018 /// One is returned if vectorization should best be avoided due to cost.
2019 unsigned computeFeasibleMaxVF(bool OptForSize);
2021 /// The vectorization cost is a combination of the cost itself and a boolean
2022 /// indicating whether any of the contributing operations will actually
2024 /// vector values after type legalization in the backend. If this latter value
2026 /// false, then all operations will be scalarized (i.e. no vectorization has
2027 /// actually taken place).
2028 typedef std::pair<unsigned, bool> VectorizationCostTy;
2030 /// Returns the expected execution cost. The unit of the cost does
2031 /// not matter because we use the 'cost' units to compare different
2032 /// vector widths. The cost that is returned is *not* normalized by
2033 /// the factor width.
2034 VectorizationCostTy expectedCost(unsigned VF);
2036 /// Returns the execution time cost of an instruction for a given vector
2037 /// width. Vector width of one means scalar.
2038 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
2040 /// The cost-computation logic from getInstructionCost which provides
2041 /// the vector type as an output parameter.
2042 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
2044 /// Calculate vectorization cost of memory instruction \p I.
2045 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
2047 /// The cost computation for scalarized memory instruction.
2048 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
2050 /// The cost computation for interleaving group of memory instructions.
2051 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
2053 /// The cost computation for Gather/Scatter instruction.
2054 unsigned getGatherScatterCost(Instruction *I, unsigned VF);
2056 /// The cost computation for widening instruction \p I with consecutive
2058 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
2060 /// The cost calculation for Load instruction \p I with uniform pointer -
2061 /// scalar load + broadcast.
2062 unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
2064 /// Returns whether the instruction is a load or store and will be a emitted
2065 /// as a vector operation.
2066 bool isConsecutiveLoadOrStore(Instruction *I);
2068 /// Create an analysis remark that explains why vectorization failed
2070 /// \p RemarkName is the identifier for the remark. \return the remark object
2071 /// that can be streamed to.
2072 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
2073 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
2074 RemarkName, TheLoop);
2077 /// Map of scalar integer values to the smallest bitwidth they can be legally
2078 /// represented as. The vector equivalents of these values should be truncated
2080 MapVector<Instruction *, uint64_t> MinBWs;
2082 /// A type representing the costs for instructions if they were to be
2083 /// scalarized rather than vectorized. The entries are Instruction-Cost
2085 typedef DenseMap<Instruction *, unsigned> ScalarCostsTy;
2087 /// A set containing all BasicBlocks that are known to present after
2088 /// vectorization as a predicated block.
2089 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
2091 /// A map holding scalar costs for different vectorization factors. The
2092 /// presence of a cost for an instruction in the mapping indicates that the
2093 /// instruction will be scalarized when vectorizing with the associated
2094 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
2095 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
2097 /// Holds the instructions known to be uniform after vectorization.
2098 /// The data is collected per VF.
2099 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
2101 /// Holds the instructions known to be scalar after vectorization.
2102 /// The data is collected per VF.
2103 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
2105 /// Returns the expected difference in cost from scalarizing the expression
2106 /// feeding a predicated instruction \p PredInst. The instructions to
2107 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
2108 /// non-negative return value implies the expression will be scalarized.
2109 /// Currently, only single-use chains are considered for scalarization.
2110 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
2113 /// Collects the instructions to scalarize for each predicated instruction in
2115 void collectInstsToScalarize(unsigned VF);
2117 /// Collect the instructions that are uniform after vectorization. An
2118 /// instruction is uniform if we represent it with a single scalar value in
2119 /// the vectorized loop corresponding to each vector iteration. Examples of
2120 /// uniform instructions include pointer operands of consecutive or
2121 /// interleaved memory accesses. Note that although uniformity implies an
2122 /// instruction will be scalar, the reverse is not true. In general, a
2123 /// scalarized instruction will be represented by VF scalar values in the
2124 /// vectorized loop, each corresponding to an iteration of the original
2126 void collectLoopUniforms(unsigned VF);
2128 /// Collect the instructions that are scalar after vectorization. An
2129 /// instruction is scalar if it is known to be uniform or will be scalarized
2130 /// during vectorization. Non-uniform scalarized instructions will be
2131 /// represented by VF values in the vectorized loop, each corresponding to an
2132 /// iteration of the original scalar loop.
2133 void collectLoopScalars(unsigned VF);
2135 /// Collect Uniform and Scalar values for the given \p VF.
2136 /// The sets depend on CM decision for Load/Store instructions
2137 /// that may be vectorized as interleave, gather-scatter or scalarized.
2138 void collectUniformsAndScalars(unsigned VF) {
2139 // Do the analysis once.
2140 if (VF == 1 || Uniforms.count(VF))
2142 setCostBasedWideningDecision(VF);
2143 collectLoopUniforms(VF);
2144 collectLoopScalars(VF);
2147 /// Keeps cost model vectorization decision and cost for instructions.
2148 /// Right now it is used for memory instructions only.
2149 typedef DenseMap<std::pair<Instruction *, unsigned>,
2150 std::pair<InstWidening, unsigned>>
2153 DecisionList WideningDecisions;
2156 /// The loop that we evaluate.
2158 /// Predicated scalar evolution analysis.
2159 PredicatedScalarEvolution &PSE;
2160 /// Loop Info analysis.
2162 /// Vectorization legality.
2163 LoopVectorizationLegality *Legal;
2164 /// Vector target information.
2165 const TargetTransformInfo &TTI;
2166 /// Target Library Info.
2167 const TargetLibraryInfo *TLI;
2168 /// Demanded bits analysis.
2170 /// Assumption cache.
2171 AssumptionCache *AC;
2172 /// Interface to emit optimization remarks.
2173 OptimizationRemarkEmitter *ORE;
2175 const Function *TheFunction;
2176 /// Loop Vectorize Hint.
2177 const LoopVectorizeHints *Hints;
2178 /// Values to ignore in the cost model.
2179 SmallPtrSet<const Value *, 16> ValuesToIgnore;
2180 /// Values to ignore in the cost model when VF > 1.
2181 SmallPtrSet<const Value *, 16> VecValuesToIgnore;
2184 /// LoopVectorizationPlanner - drives the vectorization process after having
2185 /// passed Legality checks.
2186 class LoopVectorizationPlanner {
2188 LoopVectorizationPlanner(LoopVectorizationCostModel &CM) : CM(CM) {}
2190 ~LoopVectorizationPlanner() {}
2192 /// Plan how to best vectorize, return the best VF and its cost.
2193 LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize,
2197 /// The profitablity analysis.
2198 LoopVectorizationCostModel &CM;
2201 /// \brief This holds vectorization requirements that must be verified late in
2202 /// the process. The requirements are set by legalize and costmodel. Once
2203 /// vectorization has been determined to be possible and profitable the
2204 /// requirements can be verified by looking for metadata or compiler options.
2205 /// For example, some loops require FP commutativity which is only allowed if
2206 /// vectorization is explicitly specified or if the fast-math compiler option
2207 /// has been provided.
2208 /// Late evaluation of these requirements allows helpful diagnostics to be
2209 /// composed that tells the user what need to be done to vectorize the loop. For
2210 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
2211 /// evaluation should be used only when diagnostics can generated that can be
2212 /// followed by a non-expert user.
2213 class LoopVectorizationRequirements {
2215 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE)
2216 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {}
2218 void addUnsafeAlgebraInst(Instruction *I) {
2219 // First unsafe algebra instruction.
2220 if (!UnsafeAlgebraInst)
2221 UnsafeAlgebraInst = I;
2224 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
2226 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) {
2227 const char *PassName = Hints.vectorizeAnalysisPassName();
2228 bool Failed = false;
2229 if (UnsafeAlgebraInst && !Hints.allowReordering()) {
2231 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps",
2232 UnsafeAlgebraInst->getDebugLoc(),
2233 UnsafeAlgebraInst->getParent())
2234 << "loop not vectorized: cannot prove it is safe to reorder "
2235 "floating-point operations");
2239 // Test if runtime memcheck thresholds are exceeded.
2240 bool PragmaThresholdReached =
2241 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
2242 bool ThresholdReached =
2243 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
2244 if ((ThresholdReached && !Hints.allowReordering()) ||
2245 PragmaThresholdReached) {
2246 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
2249 << "loop not vectorized: cannot prove it is safe to reorder "
2250 "memory operations");
2251 DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
2259 unsigned NumRuntimePointerChecks;
2260 Instruction *UnsafeAlgebraInst;
2262 /// Interface to emit optimization remarks.
2263 OptimizationRemarkEmitter &ORE;
2266 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) {
2268 if (!hasCyclesInLoopBody(L))
2272 for (Loop *InnerL : L)
2273 addAcyclicInnerLoop(*InnerL, V);
2276 /// The LoopVectorize Pass.
2277 struct LoopVectorize : public FunctionPass {
2278 /// Pass identification, replacement for typeid
2281 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
2282 : FunctionPass(ID) {
2283 Impl.DisableUnrolling = NoUnrolling;
2284 Impl.AlwaysVectorize = AlwaysVectorize;
2285 initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2288 LoopVectorizePass Impl;
2290 bool runOnFunction(Function &F) override {
2291 if (skipFunction(F))
2294 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2295 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2296 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2297 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2298 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2299 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2300 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
2301 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2302 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2303 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2304 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2305 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2307 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2308 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2310 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2314 void getAnalysisUsage(AnalysisUsage &AU) const override {
2315 AU.addRequired<AssumptionCacheTracker>();
2316 AU.addRequired<BlockFrequencyInfoWrapperPass>();
2317 AU.addRequired<DominatorTreeWrapperPass>();
2318 AU.addRequired<LoopInfoWrapperPass>();
2319 AU.addRequired<ScalarEvolutionWrapperPass>();
2320 AU.addRequired<TargetTransformInfoWrapperPass>();
2321 AU.addRequired<AAResultsWrapperPass>();
2322 AU.addRequired<LoopAccessLegacyAnalysis>();
2323 AU.addRequired<DemandedBitsWrapperPass>();
2324 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2325 AU.addPreserved<LoopInfoWrapperPass>();
2326 AU.addPreserved<DominatorTreeWrapperPass>();
2327 AU.addPreserved<BasicAAWrapperPass>();
2328 AU.addPreserved<GlobalsAAWrapperPass>();
2332 } // end anonymous namespace
2334 //===----------------------------------------------------------------------===//
2335 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2336 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2337 //===----------------------------------------------------------------------===//
2339 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2340 // We need to place the broadcast of invariant variables outside the loop.
2341 Instruction *Instr = dyn_cast<Instruction>(V);
2342 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
2343 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
2345 // Place the code for broadcasting invariant variables in the new preheader.
2346 IRBuilder<>::InsertPointGuard Guard(Builder);
2348 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2350 // Broadcast the scalar into all locations in the vector.
2351 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2356 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2357 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
2358 Value *Start = II.getStartValue();
2360 // Construct the initial value of the vector IV in the vector loop preheader
2361 auto CurrIP = Builder.saveIP();
2362 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2363 if (isa<TruncInst>(EntryVal)) {
2364 assert(Start->getType()->isIntegerTy() &&
2365 "Truncation requires an integer type");
2366 auto *TruncType = cast<IntegerType>(EntryVal->getType());
2367 Step = Builder.CreateTrunc(Step, TruncType);
2368 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2370 Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2371 Value *SteppedStart =
2372 getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2374 // We create vector phi nodes for both integer and floating-point induction
2375 // variables. Here, we determine the kind of arithmetic we will perform.
2376 Instruction::BinaryOps AddOp;
2377 Instruction::BinaryOps MulOp;
2378 if (Step->getType()->isIntegerTy()) {
2379 AddOp = Instruction::Add;
2380 MulOp = Instruction::Mul;
2382 AddOp = II.getInductionOpcode();
2383 MulOp = Instruction::FMul;
2386 // Multiply the vectorization factor by the step using integer or
2387 // floating-point arithmetic as appropriate.
2388 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
2389 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
2391 // Create a vector splat to use in the induction update.
2393 // FIXME: If the step is non-constant, we create the vector splat with
2394 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2395 // handle a constant vector splat.
2396 Value *SplatVF = isa<Constant>(Mul)
2397 ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2398 : Builder.CreateVectorSplat(VF, Mul);
2399 Builder.restoreIP(CurrIP);
2401 // We may need to add the step a number of times, depending on the unroll
2402 // factor. The last of those goes into the PHI.
2403 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2404 &*LoopVectorBody->getFirstInsertionPt());
2405 Instruction *LastInduction = VecInd;
2406 VectorParts Entry(UF);
2407 for (unsigned Part = 0; Part < UF; ++Part) {
2408 Entry[Part] = LastInduction;
2409 LastInduction = cast<Instruction>(addFastMathFlag(
2410 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
2412 VectorLoopValueMap.initVector(EntryVal, Entry);
2413 if (isa<TruncInst>(EntryVal))
2414 addMetadata(Entry, EntryVal);
2416 // Move the last step to the end of the latch block. This ensures consistent
2417 // placement of all induction updates.
2418 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2419 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2420 auto *ICmp = cast<Instruction>(Br->getCondition());
2421 LastInduction->moveBefore(ICmp);
2422 LastInduction->setName("vec.ind.next");
2424 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2425 VecInd->addIncoming(LastInduction, LoopVectorLatch);
2428 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2429 return Cost->isScalarAfterVectorization(I, VF) ||
2430 Cost->isProfitableToScalarize(I, VF);
2433 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2434 if (shouldScalarizeInstruction(IV))
2436 auto isScalarInst = [&](User *U) -> bool {
2437 auto *I = cast<Instruction>(U);
2438 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2440 return any_of(IV->users(), isScalarInst);
2443 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
2445 assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2446 "Primary induction variable must have an integer type");
2448 auto II = Legal->getInductionVars()->find(IV);
2449 assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
2451 auto ID = II->second;
2452 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2454 // The scalar value to broadcast. This will be derived from the canonical
2455 // induction variable.
2456 Value *ScalarIV = nullptr;
2458 // The value from the original loop to which we are mapping the new induction
2460 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2462 // True if we have vectorized the induction variable.
2463 auto VectorizedIV = false;
2465 // Determine if we want a scalar version of the induction variable. This is
2466 // true if the induction variable itself is not widened, or if it has at
2467 // least one user in the loop that is not widened.
2468 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
2470 // Generate code for the induction step. Note that induction steps are
2471 // required to be loop-invariant
2472 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
2473 "Induction step should be loop invariant");
2474 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2475 Value *Step = nullptr;
2476 if (PSE.getSE()->isSCEVable(IV->getType())) {
2477 SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2478 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
2479 LoopVectorPreHeader->getTerminator());
2481 Step = cast<SCEVUnknown>(ID.getStep())->getValue();
2484 // Try to create a new independent vector induction variable. If we can't
2485 // create the phi node, we will splat the scalar induction variable in each
2487 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
2488 createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
2489 VectorizedIV = true;
2492 // If we haven't yet vectorized the induction variable, or if we will create
2493 // a scalar one, we need to define the scalar induction variable and step
2494 // values. If we were given a truncation type, truncate the canonical
2495 // induction variable and step. Otherwise, derive these values from the
2496 // induction descriptor.
2497 if (!VectorizedIV || NeedsScalarIV) {
2498 ScalarIV = Induction;
2499 if (IV != OldInduction) {
2500 ScalarIV = IV->getType()->isIntegerTy()
2501 ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2502 : Builder.CreateCast(Instruction::SIToFP, Induction,
2504 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
2505 ScalarIV->setName("offset.idx");
2508 auto *TruncType = cast<IntegerType>(Trunc->getType());
2509 assert(Step->getType()->isIntegerTy() &&
2510 "Truncation requires an integer step");
2511 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2512 Step = Builder.CreateTrunc(Step, TruncType);
2516 // If we haven't yet vectorized the induction variable, splat the scalar
2517 // induction variable, and build the necessary step vectors.
2518 if (!VectorizedIV) {
2519 Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2520 VectorParts Entry(UF);
2521 for (unsigned Part = 0; Part < UF; ++Part)
2523 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
2524 VectorLoopValueMap.initVector(EntryVal, Entry);
2526 addMetadata(Entry, Trunc);
2529 // If an induction variable is only used for counting loop iterations or
2530 // calculating addresses, it doesn't need to be widened. Create scalar steps
2531 // that can be used by instructions we will later scalarize. Note that the
2532 // addition of the scalar steps will not increase the number of instructions
2533 // in the loop in the common case prior to InstCombine. We will be trading
2534 // one vector extract for each scalar step.
2536 buildScalarSteps(ScalarIV, Step, EntryVal, ID);
2539 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2540 Instruction::BinaryOps BinOp) {
2541 // Create and check the types.
2542 assert(Val->getType()->isVectorTy() && "Must be a vector");
2543 int VLen = Val->getType()->getVectorNumElements();
2545 Type *STy = Val->getType()->getScalarType();
2546 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2547 "Induction Step must be an integer or FP");
2548 assert(Step->getType() == STy && "Step has wrong type");
2550 SmallVector<Constant *, 8> Indices;
2552 if (STy->isIntegerTy()) {
2553 // Create a vector of consecutive numbers from zero to VF.
2554 for (int i = 0; i < VLen; ++i)
2555 Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2557 // Add the consecutive indices to the vector value.
2558 Constant *Cv = ConstantVector::get(Indices);
2559 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2560 Step = Builder.CreateVectorSplat(VLen, Step);
2561 assert(Step->getType() == Val->getType() && "Invalid step vec");
2562 // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2563 // which can be found from the original scalar operations.
2564 Step = Builder.CreateMul(Cv, Step);
2565 return Builder.CreateAdd(Val, Step, "induction");
2568 // Floating point induction.
2569 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2570 "Binary Opcode should be specified for FP induction");
2571 // Create a vector of consecutive numbers from zero to VF.
2572 for (int i = 0; i < VLen; ++i)
2573 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2575 // Add the consecutive indices to the vector value.
2576 Constant *Cv = ConstantVector::get(Indices);
2578 Step = Builder.CreateVectorSplat(VLen, Step);
2580 // Floating point operations had to be 'fast' to enable the induction.
2581 FastMathFlags Flags;
2582 Flags.setUnsafeAlgebra();
2584 Value *MulOp = Builder.CreateFMul(Cv, Step);
2585 if (isa<Instruction>(MulOp))
2586 // Have to check, MulOp may be a constant
2587 cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2589 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2590 if (isa<Instruction>(BOp))
2591 cast<Instruction>(BOp)->setFastMathFlags(Flags);
2595 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2597 const InductionDescriptor &ID) {
2599 // We shouldn't have to build scalar steps if we aren't vectorizing.
2600 assert(VF > 1 && "VF should be greater than one");
2602 // Get the value type and ensure it and the step have the same integer type.
2603 Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2604 assert(ScalarIVTy == Step->getType() &&
2605 "Val and Step should have the same type");
2607 // We build scalar steps for both integer and floating-point induction
2608 // variables. Here, we determine the kind of arithmetic we will perform.
2609 Instruction::BinaryOps AddOp;
2610 Instruction::BinaryOps MulOp;
2611 if (ScalarIVTy->isIntegerTy()) {
2612 AddOp = Instruction::Add;
2613 MulOp = Instruction::Mul;
2615 AddOp = ID.getInductionOpcode();
2616 MulOp = Instruction::FMul;
2619 // Determine the number of scalars we need to generate for each unroll
2620 // iteration. If EntryVal is uniform, we only need to generate the first
2621 // lane. Otherwise, we generate all VF values.
2623 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 : VF;
2625 // Compute the scalar steps and save the results in VectorLoopValueMap.
2626 ScalarParts Entry(UF);
2627 for (unsigned Part = 0; Part < UF; ++Part) {
2628 Entry[Part].resize(VF);
2629 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2630 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2631 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2632 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2633 Entry[Part][Lane] = Add;
2636 VectorLoopValueMap.initScalar(EntryVal, Entry);
2639 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
2641 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() :
2644 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false);
2645 if (Stride == 1 || Stride == -1)
2650 bool LoopVectorizationLegality::isUniform(Value *V) {
2651 return LAI->isUniform(V);
2654 const InnerLoopVectorizer::VectorParts &
2655 InnerLoopVectorizer::getVectorValue(Value *V) {
2656 assert(V != Induction && "The new induction variable should not be used.");
2657 assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2658 assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2660 // If we have a stride that is replaced by one, do it here.
2661 if (Legal->hasStride(V))
2662 V = ConstantInt::get(V->getType(), 1);
2664 // If we have this scalar in the map, return it.
2665 if (VectorLoopValueMap.hasVector(V))
2666 return VectorLoopValueMap.VectorMapStorage[V];
2668 // If the value has not been vectorized, check if it has been scalarized
2669 // instead. If it has been scalarized, and we actually need the value in
2670 // vector form, we will construct the vector values on demand.
2671 if (VectorLoopValueMap.hasScalar(V)) {
2673 // Initialize a new vector map entry.
2674 VectorParts Entry(UF);
2676 // If we've scalarized a value, that value should be an instruction.
2677 auto *I = cast<Instruction>(V);
2679 // If we aren't vectorizing, we can just copy the scalar map values over to
2682 for (unsigned Part = 0; Part < UF; ++Part)
2683 Entry[Part] = getScalarValue(V, Part, 0);
2684 return VectorLoopValueMap.initVector(V, Entry);
2687 // Get the last scalar instruction we generated for V. If the value is
2688 // known to be uniform after vectorization, this corresponds to lane zero
2689 // of the last unroll iteration. Otherwise, the last instruction is the one
2690 // we created for the last vector lane of the last unroll iteration.
2691 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2692 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane));
2694 // Set the insert point after the last scalarized instruction. This ensures
2695 // the insertelement sequence will directly follow the scalar definitions.
2696 auto OldIP = Builder.saveIP();
2697 auto NewIP = std::next(BasicBlock::iterator(LastInst));
2698 Builder.SetInsertPoint(&*NewIP);
2700 // However, if we are vectorizing, we need to construct the vector values.
2701 // If the value is known to be uniform after vectorization, we can just
2702 // broadcast the scalar value corresponding to lane zero for each unroll
2703 // iteration. Otherwise, we construct the vector values using insertelement
2704 // instructions. Since the resulting vectors are stored in
2705 // VectorLoopValueMap, we will only generate the insertelements once.
2706 for (unsigned Part = 0; Part < UF; ++Part) {
2707 Value *VectorValue = nullptr;
2708 if (Cost->isUniformAfterVectorization(I, VF)) {
2709 VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0));
2711 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF));
2712 for (unsigned Lane = 0; Lane < VF; ++Lane)
2713 VectorValue = Builder.CreateInsertElement(
2714 VectorValue, getScalarValue(V, Part, Lane),
2715 Builder.getInt32(Lane));
2717 Entry[Part] = VectorValue;
2719 Builder.restoreIP(OldIP);
2720 return VectorLoopValueMap.initVector(V, Entry);
2723 // If this scalar is unknown, assume that it is a constant or that it is
2724 // loop invariant. Broadcast V and save the value for future uses.
2725 Value *B = getBroadcastInstrs(V);
2726 return VectorLoopValueMap.initVector(V, VectorParts(UF, B));
2729 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part,
2732 // If the value is not an instruction contained in the loop, it should
2733 // already be scalar.
2734 if (OrigLoop->isLoopInvariant(V))
2738 !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2739 : true && "Uniform values only have lane zero");
2741 // If the value from the original loop has not been vectorized, it is
2742 // represented by UF x VF scalar values in the new loop. Return the requested
2744 if (VectorLoopValueMap.hasScalar(V))
2745 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane];
2747 // If the value has not been scalarized, get its entry in VectorLoopValueMap
2748 // for the given unroll part. If this entry is not a vector type (i.e., the
2749 // vectorization factor is one), there is no need to generate an
2750 // extractelement instruction.
2751 auto *U = getVectorValue(V)[Part];
2752 if (!U->getType()->isVectorTy()) {
2753 assert(VF == 1 && "Value not scalarized has non-vector type");
2757 // Otherwise, the value from the original loop has been vectorized and is
2758 // represented by UF vector values. Extract and return the requested scalar
2759 // value from the appropriate vector lane.
2760 return Builder.CreateExtractElement(U, Builder.getInt32(Lane));
2763 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2764 assert(Vec->getType()->isVectorTy() && "Invalid type");
2765 SmallVector<Constant *, 8> ShuffleMask;
2766 for (unsigned i = 0; i < VF; ++i)
2767 ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2769 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2770 ConstantVector::get(ShuffleMask),
2774 // Try to vectorize the interleave group that \p Instr belongs to.
2776 // E.g. Translate following interleaved load group (factor = 3):
2777 // for (i = 0; i < N; i+=3) {
2778 // R = Pic[i]; // Member of index 0
2779 // G = Pic[i+1]; // Member of index 1
2780 // B = Pic[i+2]; // Member of index 2
2781 // ... // do something to R, G, B
2784 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
2785 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements
2786 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements
2787 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements
2789 // Or translate following interleaved store group (factor = 3):
2790 // for (i = 0; i < N; i+=3) {
2791 // ... do something to R, G, B
2792 // Pic[i] = R; // Member of index 0
2793 // Pic[i+1] = G; // Member of index 1
2794 // Pic[i+2] = B; // Member of index 2
2797 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2798 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2799 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2800 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
2801 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
2802 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
2803 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr);
2804 assert(Group && "Fail to get an interleaved access group.");
2806 // Skip if current instruction is not the insert position.
2807 if (Instr != Group->getInsertPos())
2810 Value *Ptr = getPointerOperand(Instr);
2812 // Prepare for the vector type of the interleaved load/store.
2813 Type *ScalarTy = getMemInstValueType(Instr);
2814 unsigned InterleaveFactor = Group->getFactor();
2815 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2816 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
2818 // Prepare for the new pointers.
2819 setDebugLocFromInst(Builder, Ptr);
2820 SmallVector<Value *, 2> NewPtrs;
2821 unsigned Index = Group->getIndex(Instr);
2823 // If the group is reverse, adjust the index to refer to the last vector lane
2824 // instead of the first. We adjust the index from the first vector lane,
2825 // rather than directly getting the pointer for lane VF - 1, because the
2826 // pointer operand of the interleaved access is supposed to be uniform. For
2827 // uniform instructions, we're only required to generate a value for the
2828 // first vector lane in each unroll iteration.
2829 if (Group->isReverse())
2830 Index += (VF - 1) * Group->getFactor();
2832 for (unsigned Part = 0; Part < UF; Part++) {
2833 Value *NewPtr = getScalarValue(Ptr, Part, 0);
2835 // Notice current instruction could be any index. Need to adjust the address
2836 // to the member of index 0.
2838 // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
2839 // b = A[i]; // Member of index 0
2840 // Current pointer is pointed to A[i+1], adjust it to A[i].
2842 // E.g. A[i+1] = a; // Member of index 1
2843 // A[i] = b; // Member of index 0
2844 // A[i+2] = c; // Member of index 2 (Current instruction)
2845 // Current pointer is pointed to A[i+2], adjust it to A[i].
2846 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2848 // Cast to the vector pointer type.
2849 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2852 setDebugLocFromInst(Builder, Instr);
2853 Value *UndefVec = UndefValue::get(VecTy);
2855 // Vectorize the interleaved load group.
2856 if (isa<LoadInst>(Instr)) {
2858 // For each unroll part, create a wide load for the group.
2859 SmallVector<Value *, 2> NewLoads;
2860 for (unsigned Part = 0; Part < UF; Part++) {
2861 auto *NewLoad = Builder.CreateAlignedLoad(
2862 NewPtrs[Part], Group->getAlignment(), "wide.vec");
2863 addMetadata(NewLoad, Instr);
2864 NewLoads.push_back(NewLoad);
2867 // For each member in the group, shuffle out the appropriate data from the
2869 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2870 Instruction *Member = Group->getMember(I);
2872 // Skip the gaps in the group.
2876 VectorParts Entry(UF);
2877 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2878 for (unsigned Part = 0; Part < UF; Part++) {
2879 Value *StridedVec = Builder.CreateShuffleVector(
2880 NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2882 // If this member has different type, cast the result type.
2883 if (Member->getType() != ScalarTy) {
2884 VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2885 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy);
2889 Group->isReverse() ? reverseVector(StridedVec) : StridedVec;
2891 VectorLoopValueMap.initVector(Member, Entry);
2896 // The sub vector type for current instruction.
2897 VectorType *SubVT = VectorType::get(ScalarTy, VF);
2899 // Vectorize the interleaved store group.
2900 for (unsigned Part = 0; Part < UF; Part++) {
2901 // Collect the stored vector from each member.
2902 SmallVector<Value *, 4> StoredVecs;
2903 for (unsigned i = 0; i < InterleaveFactor; i++) {
2904 // Interleaved store group doesn't allow a gap, so each index has a member
2905 Instruction *Member = Group->getMember(i);
2906 assert(Member && "Fail to get a member from an interleaved store group");
2909 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part];
2910 if (Group->isReverse())
2911 StoredVec = reverseVector(StoredVec);
2913 // If this member has different type, cast it to an unified type.
2914 if (StoredVec->getType() != SubVT)
2915 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT);
2917 StoredVecs.push_back(StoredVec);
2920 // Concatenate all vectors into a wide vector.
2921 Value *WideVec = concatenateVectors(Builder, StoredVecs);
2923 // Interleave the elements in the wide vector.
2924 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2925 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2928 Instruction *NewStoreInstr =
2929 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
2930 addMetadata(NewStoreInstr, Instr);
2934 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
2935 // Attempt to issue a wide load.
2936 LoadInst *LI = dyn_cast<LoadInst>(Instr);
2937 StoreInst *SI = dyn_cast<StoreInst>(Instr);
2939 assert((LI || SI) && "Invalid Load/Store instruction");
2941 LoopVectorizationCostModel::InstWidening Decision =
2942 Cost->getWideningDecision(Instr, VF);
2943 assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
2944 "CM decision should be taken at this point");
2945 if (Decision == LoopVectorizationCostModel::CM_Interleave)
2946 return vectorizeInterleaveGroup(Instr);
2948 Type *ScalarDataTy = getMemInstValueType(Instr);
2949 Type *DataTy = VectorType::get(ScalarDataTy, VF);
2950 Value *Ptr = getPointerOperand(Instr);
2951 unsigned Alignment = getMemInstAlignment(Instr);
2952 // An alignment of 0 means target abi alignment. We need to use the scalar's
2953 // target abi alignment in such a case.
2954 const DataLayout &DL = Instr->getModule()->getDataLayout();
2956 Alignment = DL.getABITypeAlignment(ScalarDataTy);
2957 unsigned AddressSpace = getMemInstAddressSpace(Instr);
2959 // Scalarize the memory instruction if necessary.
2960 if (Decision == LoopVectorizationCostModel::CM_Scalarize)
2961 return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr));
2963 // Determine if the pointer operand of the access is either consecutive or
2964 // reverse consecutive.
2965 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
2966 bool Reverse = ConsecutiveStride < 0;
2967 bool CreateGatherScatter =
2968 (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2970 VectorParts VectorGep;
2972 // Handle consecutive loads/stores.
2973 if (ConsecutiveStride) {
2974 Ptr = getScalarValue(Ptr, 0, 0);
2976 // At this point we should vector version of GEP for Gather or Scatter
2977 assert(CreateGatherScatter && "The instruction should be scalarized");
2978 VectorGep = getVectorValue(Ptr);
2981 VectorParts Mask = createBlockInMask(Instr->getParent());
2984 assert(!Legal->isUniform(SI->getPointerOperand()) &&
2985 "We do not allow storing to uniform addresses");
2986 setDebugLocFromInst(Builder, SI);
2987 // We don't want to update the value in the map as it might be used in
2988 // another expression. So don't use a reference type for "StoredVal".
2989 VectorParts StoredVal = getVectorValue(SI->getValueOperand());
2991 for (unsigned Part = 0; Part < UF; ++Part) {
2992 Instruction *NewSI = nullptr;
2993 if (CreateGatherScatter) {
2994 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr;
2995 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part],
2996 Alignment, MaskPart);
2998 // Calculate the pointer for the specific unroll-part.
3000 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3003 // If we store to reverse consecutive memory locations, then we need
3004 // to reverse the order of elements in the stored value.
3005 StoredVal[Part] = reverseVector(StoredVal[Part]);
3006 // If the address is consecutive but reversed, then the
3007 // wide store needs to start at the last vector element.
3009 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3011 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3012 Mask[Part] = reverseVector(Mask[Part]);
3016 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3018 if (Legal->isMaskRequired(SI))
3019 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment,
3023 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment);
3025 addMetadata(NewSI, SI);
3031 assert(LI && "Must have a load instruction");
3032 setDebugLocFromInst(Builder, LI);
3033 VectorParts Entry(UF);
3034 for (unsigned Part = 0; Part < UF; ++Part) {
3036 if (CreateGatherScatter) {
3037 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr;
3038 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart,
3039 0, "wide.masked.gather");
3040 Entry[Part] = NewLI;
3042 // Calculate the pointer for the specific unroll-part.
3044 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
3047 // If the address is consecutive but reversed, then the
3048 // wide load needs to start at the last vector element.
3049 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
3050 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
3051 Mask[Part] = reverseVector(Mask[Part]);
3055 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
3056 if (Legal->isMaskRequired(LI))
3057 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
3058 UndefValue::get(DataTy),
3059 "wide.masked.load");
3061 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
3062 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI;
3064 addMetadata(NewLI, LI);
3066 VectorLoopValueMap.initVector(Instr, Entry);
3069 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
3070 bool IfPredicateInstr) {
3071 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3072 DEBUG(dbgs() << "LV: Scalarizing"
3073 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr
3075 // Holds vector parameters or scalars, in case of uniform vals.
3076 SmallVector<VectorParts, 4> Params;
3078 setDebugLocFromInst(Builder, Instr);
3080 // Does this instruction return a value ?
3081 bool IsVoidRetTy = Instr->getType()->isVoidTy();
3083 // Initialize a new scalar map entry.
3084 ScalarParts Entry(UF);
3087 if (IfPredicateInstr)
3088 Cond = createBlockInMask(Instr->getParent());
3090 // Determine the number of scalars we need to generate for each unroll
3091 // iteration. If the instruction is uniform, we only need to generate the
3092 // first lane. Otherwise, we generate all VF values.
3093 unsigned Lanes = Cost->isUniformAfterVectorization(Instr, VF) ? 1 : VF;
3095 // For each vector unroll 'part':
3096 for (unsigned Part = 0; Part < UF; ++Part) {
3097 Entry[Part].resize(VF);
3098 // For each scalar that we create:
3099 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
3102 Value *Cmp = nullptr;
3103 if (IfPredicateInstr) {
3105 if (Cmp->getType()->isVectorTy())
3106 Cmp = Builder.CreateExtractElement(Cmp, Builder.getInt32(Lane));
3107 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp,
3108 ConstantInt::get(Cmp->getType(), 1));
3111 Instruction *Cloned = Instr->clone();
3113 Cloned->setName(Instr->getName() + ".cloned");
3115 // Replace the operands of the cloned instructions with their scalar
3116 // equivalents in the new loop.
3117 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
3118 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane);
3119 Cloned->setOperand(op, NewOp);
3121 addNewMetadata(Cloned, Instr);
3123 // Place the cloned scalar in the new loop.
3124 Builder.Insert(Cloned);
3126 // Add the cloned scalar to the scalar map entry.
3127 Entry[Part][Lane] = Cloned;
3129 // If we just cloned a new assumption, add it the assumption cache.
3130 if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
3131 if (II->getIntrinsicID() == Intrinsic::assume)
3132 AC->registerAssumption(II);
3135 if (IfPredicateInstr)
3136 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp));
3139 VectorLoopValueMap.initScalar(Instr, Entry);
3142 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3143 Value *End, Value *Step,
3145 BasicBlock *Header = L->getHeader();
3146 BasicBlock *Latch = L->getLoopLatch();
3147 // As we're just creating this loop, it's possible no latch exists
3148 // yet. If so, use the header as this will be a single block loop.
3152 IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3153 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3154 setDebugLocFromInst(Builder, OldInst);
3155 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3157 Builder.SetInsertPoint(Latch->getTerminator());
3158 setDebugLocFromInst(Builder, OldInst);
3160 // Create i+1 and fill the PHINode.
3161 Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3162 Induction->addIncoming(Start, L->getLoopPreheader());
3163 Induction->addIncoming(Next, Latch);
3164 // Create the compare.
3165 Value *ICmp = Builder.CreateICmpEQ(Next, End);
3166 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
3168 // Now we have two terminators. Remove the old one from the block.
3169 Latch->getTerminator()->eraseFromParent();
3174 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3178 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3179 // Find the loop boundaries.
3180 ScalarEvolution *SE = PSE.getSE();
3181 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3182 assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
3183 "Invalid loop count");
3185 Type *IdxTy = Legal->getWidestInductionType();
3187 // The exit count might have the type of i64 while the phi is i32. This can
3188 // happen if we have an induction variable that is sign extended before the
3189 // compare. The only way that we get a backedge taken count is that the
3190 // induction variable was signed and as such will not overflow. In such a case
3191 // truncation is legal.
3192 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
3193 IdxTy->getPrimitiveSizeInBits())
3194 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3195 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3197 // Get the total trip count from the count by adding 1.
3198 const SCEV *ExitCount = SE->getAddExpr(
3199 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3201 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3203 // Expand the trip count and place the new instructions in the preheader.
3204 // Notice that the pre-header does not change, only the loop body.
3205 SCEVExpander Exp(*SE, DL, "induction");
3207 // Count holds the overall loop count (N).
3208 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3209 L->getLoopPreheader()->getTerminator());
3211 if (TripCount->getType()->isPointerTy())
3213 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3214 L->getLoopPreheader()->getTerminator());
3219 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3220 if (VectorTripCount)
3221 return VectorTripCount;
3223 Value *TC = getOrCreateTripCount(L);
3224 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3226 // Now we need to generate the expression for the part of the loop that the
3227 // vectorized body will execute. This is equal to N - (N % Step) if scalar
3228 // iterations are not required for correctness, or N - Step, otherwise. Step
3229 // is equal to the vectorization factor (number of SIMD elements) times the
3230 // unroll factor (number of SIMD instructions).
3231 Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
3232 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3234 // If there is a non-reversed interleaved group that may speculatively access
3235 // memory out-of-bounds, we need to ensure that there will be at least one
3236 // iteration of the scalar epilogue loop. Thus, if the step evenly divides
3237 // the trip count, we set the remainder to be equal to the step. If the step
3238 // does not evenly divide the trip count, no adjustment is necessary since
3239 // there will already be scalar iterations. Note that the minimum iterations
3240 // check ensures that N >= Step.
3241 if (VF > 1 && Legal->requiresScalarEpilogue()) {
3242 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3243 R = Builder.CreateSelect(IsZero, Step, R);
3246 VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3248 return VectorTripCount;
3251 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3252 BasicBlock *Bypass) {
3253 Value *Count = getOrCreateTripCount(L);
3254 BasicBlock *BB = L->getLoopPreheader();
3255 IRBuilder<> Builder(BB->getTerminator());
3257 // Generate code to check that the loop's trip count that we computed by
3258 // adding one to the backedge-taken count will not overflow.
3259 Value *CheckMinIters = Builder.CreateICmpULT(
3260 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
3263 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked");
3264 // Update dominator tree immediately if the generated block is a
3265 // LoopBypassBlock because SCEV expansions to generate loop bypass
3266 // checks may query it before the current function is finished.
3267 DT->addNewBlock(NewBB, BB);
3268 if (L->getParentLoop())
3269 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3270 ReplaceInstWithInst(BB->getTerminator(),
3271 BranchInst::Create(Bypass, NewBB, CheckMinIters));
3272 LoopBypassBlocks.push_back(BB);
3275 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L,
3276 BasicBlock *Bypass) {
3277 Value *TC = getOrCreateVectorTripCount(L);
3278 BasicBlock *BB = L->getLoopPreheader();
3279 IRBuilder<> Builder(BB->getTerminator());
3281 // Now, compare the new count to zero. If it is zero skip the vector loop and
3282 // jump to the scalar loop.
3283 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()),
3286 // Generate code to check that the loop's trip count that we computed by
3287 // adding one to the backedge-taken count will not overflow.
3288 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3289 // Update dominator tree immediately if the generated block is a
3290 // LoopBypassBlock because SCEV expansions to generate loop bypass
3291 // checks may query it before the current function is finished.
3292 DT->addNewBlock(NewBB, BB);
3293 if (L->getParentLoop())
3294 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3295 ReplaceInstWithInst(BB->getTerminator(),
3296 BranchInst::Create(Bypass, NewBB, Cmp));
3297 LoopBypassBlocks.push_back(BB);
3300 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3301 BasicBlock *BB = L->getLoopPreheader();
3303 // Generate the code to check that the SCEV assumptions that we made.
3304 // We want the new basic block to start at the first instruction in a
3305 // sequence of instructions that form a check.
3306 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
3309 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
3311 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
3315 // Create a new block containing the stride check.
3316 BB->setName("vector.scevcheck");
3317 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3318 // Update dominator tree immediately if the generated block is a
3319 // LoopBypassBlock because SCEV expansions to generate loop bypass
3320 // checks may query it before the current function is finished.
3321 DT->addNewBlock(NewBB, BB);
3322 if (L->getParentLoop())
3323 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3324 ReplaceInstWithInst(BB->getTerminator(),
3325 BranchInst::Create(Bypass, NewBB, SCEVCheck));
3326 LoopBypassBlocks.push_back(BB);
3327 AddedSafetyChecks = true;
3330 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3331 BasicBlock *BB = L->getLoopPreheader();
3333 // Generate the code that checks in runtime if arrays overlap. We put the
3334 // checks into a separate block to make the more common case of few elements
3336 Instruction *FirstCheckInst;
3337 Instruction *MemRuntimeCheck;
3338 std::tie(FirstCheckInst, MemRuntimeCheck) =
3339 Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
3340 if (!MemRuntimeCheck)
3343 // Create a new block containing the memory check.
3344 BB->setName("vector.memcheck");
3345 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3346 // Update dominator tree immediately if the generated block is a
3347 // LoopBypassBlock because SCEV expansions to generate loop bypass
3348 // checks may query it before the current function is finished.
3349 DT->addNewBlock(NewBB, BB);
3350 if (L->getParentLoop())
3351 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3352 ReplaceInstWithInst(BB->getTerminator(),
3353 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
3354 LoopBypassBlocks.push_back(BB);
3355 AddedSafetyChecks = true;
3357 // We currently don't use LoopVersioning for the actual loop cloning but we
3358 // still use it to add the noalias metadata.
3359 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
3361 LVer->prepareNoAliasMetadata();
3364 void InnerLoopVectorizer::createEmptyLoop() {
3366 In this function we generate a new loop. The new loop will contain
3367 the vectorized instructions while the old loop will continue to run the
3370 [ ] <-- loop iteration number check.
3373 | [ ] <-- vector loop bypass (may consist of multiple blocks).
3376 || [ ] <-- vector pre header.
3380 | [ ]_| <-- vector loop.
3383 | -[ ] <--- middle-block.
3386 -|- >[ ] <--- new preheader.
3390 | [ ]_| <-- old scalar loop to handle remainder.
3393 >[ ] <-- exit block.
3397 BasicBlock *OldBasicBlock = OrigLoop->getHeader();
3398 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
3399 BasicBlock *ExitBlock = OrigLoop->getExitBlock();
3400 assert(VectorPH && "Invalid loop structure");
3401 assert(ExitBlock && "Must have an exit block");
3403 // Some loops have a single integer induction variable, while other loops
3404 // don't. One example is c++ iterators that often have multiple pointer
3405 // induction variables. In the code below we also support a case where we
3406 // don't have a single induction variable.
3408 // We try to obtain an induction variable from the original loop as hard
3409 // as possible. However if we don't find one that:
3411 // - counts from zero, stepping by one
3412 // - is the size of the widest induction variable type
3413 // then we create a new one.
3414 OldInduction = Legal->getPrimaryInduction();
3415 Type *IdxTy = Legal->getWidestInductionType();
3417 // Split the single block loop into the two loop structure described above.
3418 BasicBlock *VecBody =
3419 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
3420 BasicBlock *MiddleBlock =
3421 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
3422 BasicBlock *ScalarPH =
3423 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
3425 // Create and register the new vector loop.
3426 Loop *Lp = new Loop();
3427 Loop *ParentLoop = OrigLoop->getParentLoop();
3429 // Insert the new loop into the loop nest and register the new basic blocks
3430 // before calling any utilities such as SCEV that require valid LoopInfo.
3432 ParentLoop->addChildLoop(Lp);
3433 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
3434 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
3436 LI->addTopLevelLoop(Lp);
3438 Lp->addBasicBlockToLoop(VecBody, *LI);
3440 // Find the loop boundaries.
3441 Value *Count = getOrCreateTripCount(Lp);
3443 Value *StartIdx = ConstantInt::get(IdxTy, 0);
3445 // We need to test whether the backedge-taken count is uint##_max. Adding one
3446 // to it will cause overflow and an incorrect loop trip count in the vector
3447 // body. In case of overflow we want to directly jump to the scalar remainder
3449 emitMinimumIterationCountCheck(Lp, ScalarPH);
3450 // Now, compare the new count to zero. If it is zero skip the vector loop and
3451 // jump to the scalar loop.
3452 emitVectorLoopEnteredCheck(Lp, ScalarPH);
3453 // Generate the code to check any assumptions that we've made for SCEV
3455 emitSCEVChecks(Lp, ScalarPH);
3457 // Generate the code that checks in runtime if arrays overlap. We put the
3458 // checks into a separate block to make the more common case of few elements
3460 emitMemRuntimeChecks(Lp, ScalarPH);
3462 // Generate the induction variable.
3463 // The loop step is equal to the vectorization factor (num of SIMD elements)
3464 // times the unroll factor (num of SIMD instructions).
3465 Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3466 Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3468 createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3469 getDebugLocFromInstOrOperands(OldInduction));
3471 // We are going to resume the execution of the scalar loop.
3472 // Go over all of the induction variables that we found and fix the
3473 // PHIs that are left in the scalar version of the loop.
3474 // The starting values of PHI nodes depend on the counter of the last
3475 // iteration in the vectorized loop.
3476 // If we come from a bypass edge then we need to start from the original
3479 // This variable saves the new starting index for the scalar loop. It is used
3480 // to test if there are any tail iterations left once the vector loop has
3482 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
3483 for (auto &InductionEntry : *List) {
3484 PHINode *OrigPhi = InductionEntry.first;
3485 InductionDescriptor II = InductionEntry.second;
3487 // Create phi nodes to merge from the backedge-taken check block.
3488 PHINode *BCResumeVal = PHINode::Create(
3489 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
3490 Value *&EndValue = IVEndValues[OrigPhi];
3491 if (OrigPhi == OldInduction) {
3492 // We know what the end value is.
3493 EndValue = CountRoundDown;
3495 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator());
3496 Type *StepType = II.getStep()->getType();
3497 Instruction::CastOps CastOp =
3498 CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3499 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3500 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3501 EndValue = II.transform(B, CRD, PSE.getSE(), DL);
3502 EndValue->setName("ind.end");
3505 // The new PHI merges the original incoming value, in case of a bypass,
3506 // or the value at the end of the vectorized loop.
3507 BCResumeVal->addIncoming(EndValue, MiddleBlock);
3509 // Fix the scalar body counter (PHI node).
3510 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
3512 // The old induction's phi node in the scalar body needs the truncated
3514 for (BasicBlock *BB : LoopBypassBlocks)
3515 BCResumeVal->addIncoming(II.getStartValue(), BB);
3516 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
3519 // Add a check in the middle block to see if we have completed
3520 // all of the iterations in the first vector loop.
3521 // If (N - N%VF) == N, then we *don't* need to run the remainder.
3523 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3524 CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
3525 ReplaceInstWithInst(MiddleBlock->getTerminator(),
3526 BranchInst::Create(ExitBlock, ScalarPH, CmpN));
3528 // Get ready to start creating new instructions into the vectorized body.
3529 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
3532 LoopVectorPreHeader = Lp->getLoopPreheader();
3533 LoopScalarPreHeader = ScalarPH;
3534 LoopMiddleBlock = MiddleBlock;
3535 LoopExitBlock = ExitBlock;
3536 LoopVectorBody = VecBody;
3537 LoopScalarBody = OldBasicBlock;
3539 // Keep all loop hints from the original loop on the vector loop (we'll
3540 // replace the vectorizer-specific hints below).
3541 if (MDNode *LID = OrigLoop->getLoopID())
3544 LoopVectorizeHints Hints(Lp, true, *ORE);
3545 Hints.setAlreadyVectorized();
3548 // Fix up external users of the induction variable. At this point, we are
3549 // in LCSSA form, with all external PHIs that use the IV having one input value,
3550 // coming from the remainder loop. We need those PHIs to also have a correct
3551 // value for the IV when arriving directly from the middle block.
3552 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3553 const InductionDescriptor &II,
3554 Value *CountRoundDown, Value *EndValue,
3555 BasicBlock *MiddleBlock) {
3556 // There are two kinds of external IV usages - those that use the value
3557 // computed in the last iteration (the PHI) and those that use the penultimate
3558 // value (the value that feeds into the phi from the loop latch).
3559 // We allow both, but they, obviously, have different values.
3561 assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3563 DenseMap<Value *, Value *> MissingVals;
3565 // An external user of the last iteration's value should see the value that
3566 // the remainder loop uses to initialize its own IV.
3567 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3568 for (User *U : PostInc->users()) {
3569 Instruction *UI = cast<Instruction>(U);
3570 if (!OrigLoop->contains(UI)) {
3571 assert(isa<PHINode>(UI) && "Expected LCSSA form");
3572 MissingVals[UI] = EndValue;
3576 // An external user of the penultimate value need to see EndValue - Step.
3577 // The simplest way to get this is to recompute it from the constituent SCEVs,
3578 // that is Start + (Step * (CRD - 1)).
3579 for (User *U : OrigPhi->users()) {
3580 auto *UI = cast<Instruction>(U);
3581 if (!OrigLoop->contains(UI)) {
3582 const DataLayout &DL =
3583 OrigLoop->getHeader()->getModule()->getDataLayout();
3584 assert(isa<PHINode>(UI) && "Expected LCSSA form");
3586 IRBuilder<> B(MiddleBlock->getTerminator());
3587 Value *CountMinusOne = B.CreateSub(
3588 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3589 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(),
3591 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
3592 Escape->setName("ind.escape");
3593 MissingVals[UI] = Escape;
3597 for (auto &I : MissingVals) {
3598 PHINode *PHI = cast<PHINode>(I.first);
3599 // One corner case we have to handle is two IVs "chasing" each-other,
3600 // that is %IV2 = phi [...], [ %IV1, %latch ]
3601 // In this case, if IV1 has an external use, we need to avoid adding both
3602 // "last value of IV1" and "penultimate value of IV2". So, verify that we
3603 // don't already have an incoming value for the middle block.
3604 if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3605 PHI->addIncoming(I.second, MiddleBlock);
3610 struct CSEDenseMapInfo {
3611 static bool canHandle(const Instruction *I) {
3612 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3613 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3615 static inline Instruction *getEmptyKey() {
3616 return DenseMapInfo<Instruction *>::getEmptyKey();
3618 static inline Instruction *getTombstoneKey() {
3619 return DenseMapInfo<Instruction *>::getTombstoneKey();
3621 static unsigned getHashValue(const Instruction *I) {
3622 assert(canHandle(I) && "Unknown instruction!");
3623 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3624 I->value_op_end()));
3626 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3627 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3628 LHS == getTombstoneKey() || RHS == getTombstoneKey())
3630 return LHS->isIdenticalTo(RHS);
3635 ///\brief Perform cse of induction variable instructions.
3636 static void cse(BasicBlock *BB) {
3637 // Perform simple cse.
3638 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3639 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3640 Instruction *In = &*I++;
3642 if (!CSEDenseMapInfo::canHandle(In))
3645 // Check if we can replace this instruction with any of the
3646 // visited instructions.
3647 if (Instruction *V = CSEMap.lookup(In)) {
3648 In->replaceAllUsesWith(V);
3649 In->eraseFromParent();
3657 /// \brief Estimate the overhead of scalarizing an instruction. This is a
3658 /// convenience wrapper for the type-based getScalarizationOverhead API.
3659 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
3660 const TargetTransformInfo &TTI) {
3665 Type *RetTy = ToVectorTy(I->getType(), VF);
3666 if (!RetTy->isVoidTy() &&
3667 (!isa<LoadInst>(I) ||
3668 !TTI.supportsEfficientVectorElementLoadStore()))
3669 Cost += TTI.getScalarizationOverhead(RetTy, true, false);
3671 if (CallInst *CI = dyn_cast<CallInst>(I)) {
3672 SmallVector<const Value *, 4> Operands(CI->arg_operands());
3673 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3675 else if (!isa<StoreInst>(I) ||
3676 !TTI.supportsEfficientVectorElementLoadStore()) {
3677 SmallVector<const Value *, 4> Operands(I->operand_values());
3678 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
3684 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3685 // Return the cost of the instruction, including scalarization overhead if it's
3686 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3687 // i.e. either vector version isn't available, or is too expensive.
3688 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3689 const TargetTransformInfo &TTI,
3690 const TargetLibraryInfo *TLI,
3691 bool &NeedToScalarize) {
3692 Function *F = CI->getCalledFunction();
3693 StringRef FnName = CI->getCalledFunction()->getName();
3694 Type *ScalarRetTy = CI->getType();
3695 SmallVector<Type *, 4> Tys, ScalarTys;
3696 for (auto &ArgOp : CI->arg_operands())
3697 ScalarTys.push_back(ArgOp->getType());
3699 // Estimate cost of scalarized vector call. The source operands are assumed
3700 // to be vectors, so we need to extract individual elements from there,
3701 // execute VF scalar calls, and then gather the result into the vector return
3703 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3705 return ScalarCallCost;
3707 // Compute corresponding vector type for return value and arguments.
3708 Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3709 for (Type *ScalarTy : ScalarTys)
3710 Tys.push_back(ToVectorTy(ScalarTy, VF));
3712 // Compute costs of unpacking argument values for the scalar calls and
3713 // packing the return values to a vector.
3714 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
3716 unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3718 // If we can't emit a vector call for this function, then the currently found
3719 // cost is the cost we need to return.
3720 NeedToScalarize = true;
3721 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3724 // If the corresponding vector cost is cheaper, return its cost.
3725 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3726 if (VectorCallCost < Cost) {
3727 NeedToScalarize = false;
3728 return VectorCallCost;
3733 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3734 // factor VF. Return the cost of the instruction, including scalarization
3735 // overhead if it's needed.
3736 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3737 const TargetTransformInfo &TTI,
3738 const TargetLibraryInfo *TLI) {
3739 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3740 assert(ID && "Expected intrinsic call!");
3743 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3744 FMF = FPMO->getFastMathFlags();
3746 SmallVector<Value *, 4> Operands(CI->arg_operands());
3747 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3750 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3751 auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3752 auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3753 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3755 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3756 auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3757 auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3758 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3761 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3762 // For every instruction `I` in MinBWs, truncate the operands, create a
3763 // truncated version of `I` and reextend its result. InstCombine runs
3764 // later and will remove any ext/trunc pairs.
3766 SmallPtrSet<Value *, 4> Erased;
3767 for (const auto &KV : Cost->getMinimalBitwidths()) {
3768 // If the value wasn't vectorized, we must maintain the original scalar
3769 // type. The absence of the value from VectorLoopValueMap indicates that it
3770 // wasn't vectorized.
3771 if (!VectorLoopValueMap.hasVector(KV.first))
3773 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first);
3774 for (Value *&I : Parts) {
3775 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3777 Type *OriginalTy = I->getType();
3778 Type *ScalarTruncatedTy =
3779 IntegerType::get(OriginalTy->getContext(), KV.second);
3780 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3781 OriginalTy->getVectorNumElements());
3782 if (TruncatedTy == OriginalTy)
3785 IRBuilder<> B(cast<Instruction>(I));
3786 auto ShrinkOperand = [&](Value *V) -> Value * {
3787 if (auto *ZI = dyn_cast<ZExtInst>(V))
3788 if (ZI->getSrcTy() == TruncatedTy)
3789 return ZI->getOperand(0);
3790 return B.CreateZExtOrTrunc(V, TruncatedTy);
3793 // The actual instruction modification depends on the instruction type,
3795 Value *NewI = nullptr;
3796 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3797 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3798 ShrinkOperand(BO->getOperand(1)));
3799 cast<BinaryOperator>(NewI)->copyIRFlags(I);
3800 } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3802 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3803 ShrinkOperand(CI->getOperand(1)));
3804 } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3805 NewI = B.CreateSelect(SI->getCondition(),
3806 ShrinkOperand(SI->getTrueValue()),
3807 ShrinkOperand(SI->getFalseValue()));
3808 } else if (auto *CI = dyn_cast<CastInst>(I)) {
3809 switch (CI->getOpcode()) {
3811 llvm_unreachable("Unhandled cast!");
3812 case Instruction::Trunc:
3813 NewI = ShrinkOperand(CI->getOperand(0));
3815 case Instruction::SExt:
3816 NewI = B.CreateSExtOrTrunc(
3818 smallestIntegerVectorType(OriginalTy, TruncatedTy));
3820 case Instruction::ZExt:
3821 NewI = B.CreateZExtOrTrunc(
3823 smallestIntegerVectorType(OriginalTy, TruncatedTy));
3826 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3827 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3828 auto *O0 = B.CreateZExtOrTrunc(
3829 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3830 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3831 auto *O1 = B.CreateZExtOrTrunc(
3832 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3834 NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3835 } else if (isa<LoadInst>(I)) {
3836 // Don't do anything with the operands, just extend the result.
3838 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3839 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3840 auto *O0 = B.CreateZExtOrTrunc(
3841 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3842 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3843 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3844 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3845 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3846 auto *O0 = B.CreateZExtOrTrunc(
3847 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3848 NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3850 llvm_unreachable("Unhandled instruction type!");
3853 // Lastly, extend the result.
3854 NewI->takeName(cast<Instruction>(I));
3855 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3856 I->replaceAllUsesWith(Res);
3857 cast<Instruction>(I)->eraseFromParent();
3863 // We'll have created a bunch of ZExts that are now parentless. Clean up.
3864 for (const auto &KV : Cost->getMinimalBitwidths()) {
3865 // If the value wasn't vectorized, we must maintain the original scalar
3866 // type. The absence of the value from VectorLoopValueMap indicates that it
3867 // wasn't vectorized.
3868 if (!VectorLoopValueMap.hasVector(KV.first))
3870 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first);
3871 for (Value *&I : Parts) {
3872 ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3873 if (Inst && Inst->use_empty()) {
3874 Value *NewI = Inst->getOperand(0);
3875 Inst->eraseFromParent();
3882 void InnerLoopVectorizer::vectorizeLoop() {
3883 //===------------------------------------------------===//
3885 // Notice: any optimization or new instruction that go
3886 // into the code below should be also be implemented in
3889 //===------------------------------------------------===//
3891 // Collect instructions from the original loop that will become trivially dead
3892 // in the vectorized loop. We don't need to vectorize these instructions. For
3893 // example, original induction update instructions can become dead because we
3894 // separately emit induction "steps" when generating code for the new loop.
3895 // Similarly, we create a new latch condition when setting up the structure
3896 // of the new loop, so the old one can become dead.
3897 SmallPtrSet<Instruction *, 4> DeadInstructions;
3898 collectTriviallyDeadInstructions(DeadInstructions);
3900 // Scan the loop in a topological order to ensure that defs are vectorized
3902 LoopBlocksDFS DFS(OrigLoop);
3905 // Vectorize all instructions in the original loop that will not become
3906 // trivially dead when vectorized.
3907 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
3908 for (Instruction &I : *BB)
3909 if (!DeadInstructions.count(&I))
3910 vectorizeInstruction(I);
3912 // Insert truncates and extends for any truncated instructions as hints to
3915 truncateToMinimalBitwidths();
3917 // At this point every instruction in the original loop is widened to a
3918 // vector form. Now we need to fix the recurrences in the loop. These PHI
3919 // nodes are currently empty because we did not want to introduce cycles.
3920 // This is the second stage of vectorizing recurrences.
3921 fixCrossIterationPHIs();
3923 // Update the dominator tree.
3925 // FIXME: After creating the structure of the new loop, the dominator tree is
3926 // no longer up-to-date, and it remains that way until we update it
3927 // here. An out-of-date dominator tree is problematic for SCEV,
3928 // because SCEVExpander uses it to guide code generation. The
3929 // vectorizer use SCEVExpanders in several places. Instead, we should
3930 // keep the dominator tree up-to-date as we go.
3933 // Fix-up external users of the induction variables.
3934 for (auto &Entry : *Legal->getInductionVars())
3935 fixupIVUsers(Entry.first, Entry.second,
3936 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3937 IVEndValues[Entry.first], LoopMiddleBlock);
3940 predicateInstructions();
3942 // Remove redundant induction instructions.
3943 cse(LoopVectorBody);
3946 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3947 // In order to support recurrences we need to be able to vectorize Phi nodes.
3948 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3949 // stage #2: We now need to fix the recurrences by adding incoming edges to
3950 // the currently empty PHI nodes. At this point every instruction in the
3951 // original loop is widened to a vector form so we can use them to construct
3952 // the incoming edges.
3953 for (Instruction &I : *OrigLoop->getHeader()) {
3954 PHINode *Phi = dyn_cast<PHINode>(&I);
3957 // Handle first-order recurrences and reductions that need to be fixed.
3958 if (Legal->isFirstOrderRecurrence(Phi))
3959 fixFirstOrderRecurrence(Phi);
3960 else if (Legal->isReductionVariable(Phi))
3965 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3967 // This is the second phase of vectorizing first-order recurrences. An
3968 // overview of the transformation is described below. Suppose we have the
3971 // for (int i = 0; i < n; ++i)
3972 // b[i] = a[i] - a[i - 1];
3974 // There is a first-order recurrence on "a". For this loop, the shorthand
3975 // scalar IR looks like:
3982 // i = phi [0, scalar.ph], [i+1, scalar.body]
3983 // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3986 // br cond, scalar.body, ...
3988 // In this example, s1 is a recurrence because it's value depends on the
3989 // previous iteration. In the first phase of vectorization, we created a
3990 // temporary value for s1. We now complete the vectorization and produce the
3991 // shorthand vector IR shown below (for VF = 4, UF = 1).
3994 // v_init = vector(..., ..., ..., a[-1])
3998 // i = phi [0, vector.ph], [i+4, vector.body]
3999 // v1 = phi [v_init, vector.ph], [v2, vector.body]
4000 // v2 = a[i, i+1, i+2, i+3];
4001 // v3 = vector(v1(3), v2(0, 1, 2))
4002 // b[i, i+1, i+2, i+3] = v2 - v3
4003 // br cond, vector.body, middle.block
4010 // s_init = phi [x, middle.block], [a[-1], otherwise]
4013 // After execution completes the vector loop, we extract the next value of
4014 // the recurrence (x) to use as the initial value in the scalar loop.
4016 // Get the original loop preheader and single loop latch.
4017 auto *Preheader = OrigLoop->getLoopPreheader();
4018 auto *Latch = OrigLoop->getLoopLatch();
4020 // Get the initial and previous values of the scalar recurrence.
4021 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4022 auto *Previous = Phi->getIncomingValueForBlock(Latch);
4024 // Create a vector from the initial value.
4025 auto *VectorInit = ScalarInit;
4027 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4028 VectorInit = Builder.CreateInsertElement(
4029 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
4030 Builder.getInt32(VF - 1), "vector.recur.init");
4033 // We constructed a temporary phi node in the first phase of vectorization.
4034 // This phi node will eventually be deleted.
4035 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi);
4036 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0]));
4038 // Create a phi node for the new recurrence. The current value will either be
4039 // the initial value inserted into a vector or loop-varying vector value.
4040 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4041 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4043 // Get the vectorized previous value.
4044 auto &PreviousParts = getVectorValue(Previous);
4046 // Set the insertion point after the previous value if it is an instruction.
4047 // Note that the previous value may have been constant-folded so it is not
4048 // guaranteed to be an instruction in the vector loop.
4049 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousParts[UF - 1]))
4050 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
4052 Builder.SetInsertPoint(
4053 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1])));
4055 // We will construct a vector for the recurrence by combining the values for
4056 // the current and previous iterations. This is the required shuffle mask.
4057 SmallVector<Constant *, 8> ShuffleMask(VF);
4058 ShuffleMask[0] = Builder.getInt32(VF - 1);
4059 for (unsigned I = 1; I < VF; ++I)
4060 ShuffleMask[I] = Builder.getInt32(I + VF - 1);
4062 // The vector from which to take the initial value for the current iteration
4063 // (actual or unrolled). Initially, this is the vector phi node.
4064 Value *Incoming = VecPhi;
4066 // Shuffle the current and previous vector and update the vector parts.
4067 for (unsigned Part = 0; Part < UF; ++Part) {
4070 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part],
4071 ConstantVector::get(ShuffleMask))
4073 PhiParts[Part]->replaceAllUsesWith(Shuffle);
4074 cast<Instruction>(PhiParts[Part])->eraseFromParent();
4075 PhiParts[Part] = Shuffle;
4076 Incoming = PreviousParts[Part];
4079 // Fix the latch value of the new recurrence in the vector loop.
4080 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4082 // Extract the last vector element in the middle block. This will be the
4083 // initial value for the recurrence when jumping to the scalar loop.
4084 auto *ExtractForScalar = Incoming;
4086 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4087 ExtractForScalar = Builder.CreateExtractElement(
4088 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
4090 // Extract the second last element in the middle block if the
4091 // Phi is used outside the loop. We need to extract the phi itself
4092 // and not the last element (the phi update in the current iteration). This
4093 // will be the value when jumping to the exit block from the LoopMiddleBlock,
4094 // when the scalar loop is not run at all.
4095 Value *ExtractForPhiUsedOutsideLoop = nullptr;
4097 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4098 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
4099 // When loop is unrolled without vectorizing, initialize
4100 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
4101 // `Incoming`. This is analogous to the vectorized case above: extracting the
4102 // second last element when VF > 1.
4104 ExtractForPhiUsedOutsideLoop = PreviousParts[UF - 2];
4106 // Fix the initial value of the original recurrence in the scalar loop.
4107 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4108 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4109 for (auto *BB : predecessors(LoopScalarPreHeader)) {
4110 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4111 Start->addIncoming(Incoming, BB);
4114 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
4115 Phi->setName("scalar.recur");
4117 // Finally, fix users of the recurrence outside the loop. The users will need
4118 // either the last value of the scalar recurrence or the last value of the
4119 // vector recurrence we extracted in the middle block. Since the loop is in
4120 // LCSSA form, we just need to find the phi node for the original scalar
4121 // recurrence in the exit block, and then add an edge for the middle block.
4122 for (auto &I : *LoopExitBlock) {
4123 auto *LCSSAPhi = dyn_cast<PHINode>(&I);
4126 if (LCSSAPhi->getIncomingValue(0) == Phi) {
4127 LCSSAPhi->addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4133 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
4134 Constant *Zero = Builder.getInt32(0);
4136 // Get it's reduction variable descriptor.
4137 assert(Legal->isReductionVariable(Phi) &&
4138 "Unable to find the reduction variable");
4139 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
4141 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
4142 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4143 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4144 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
4145 RdxDesc.getMinMaxRecurrenceKind();
4146 setDebugLocFromInst(Builder, ReductionStartValue);
4148 // We need to generate a reduction vector from the incoming scalar.
4149 // To do so, we need to generate the 'identity' vector and override
4150 // one of the elements with the incoming scalar reduction. We need
4151 // to do it in the vector-loop preheader.
4152 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator());
4154 // This is the vector-clone of the value that leaves the loop.
4155 const VectorParts &VectorExit = getVectorValue(LoopExitInst);
4156 Type *VecTy = VectorExit[0]->getType();
4158 // Find the reduction identity variable. Zero for addition, or, xor,
4159 // one for multiplication, -1 for And.
4162 if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
4163 RK == RecurrenceDescriptor::RK_FloatMinMax) {
4164 // MinMax reduction have the start value as their identify.
4166 VectorStart = Identity = ReductionStartValue;
4168 VectorStart = Identity =
4169 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
4172 // Handle other reduction kinds:
4173 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
4174 RK, VecTy->getScalarType());
4177 // This vector is the Identity vector where the first element is the
4178 // incoming scalar reduction.
4179 VectorStart = ReductionStartValue;
4181 Identity = ConstantVector::getSplat(VF, Iden);
4183 // This vector is the Identity vector where the first element is the
4184 // incoming scalar reduction.
4186 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
4190 // Fix the vector-loop phi.
4192 // Reductions do not have to start at zero. They can start with
4193 // any loop invariant values.
4194 const VectorParts &VecRdxPhi = getVectorValue(Phi);
4195 BasicBlock *Latch = OrigLoop->getLoopLatch();
4196 Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
4197 const VectorParts &Val = getVectorValue(LoopVal);
4198 for (unsigned part = 0; part < UF; ++part) {
4199 // Make sure to add the reduction stat value only to the
4200 // first unroll part.
4201 Value *StartVal = (part == 0) ? VectorStart : Identity;
4202 cast<PHINode>(VecRdxPhi[part])
4203 ->addIncoming(StartVal, LoopVectorPreHeader);
4204 cast<PHINode>(VecRdxPhi[part])
4205 ->addIncoming(Val[part], LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4208 // Before each round, move the insertion point right between
4209 // the PHIs and the values we are going to write.
4210 // This allows us to write both PHINodes and the extractelement
4212 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4214 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst);
4215 setDebugLocFromInst(Builder, LoopExitInst);
4217 // If the vector reduction can be performed in a smaller type, we truncate
4218 // then extend the loop exit value to enable InstCombine to evaluate the
4219 // entire expression in the smaller type.
4220 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
4221 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4222 Builder.SetInsertPoint(LoopVectorBody->getTerminator());
4223 for (unsigned part = 0; part < UF; ++part) {
4224 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
4225 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4226 : Builder.CreateZExt(Trunc, VecTy);
4227 for (Value::user_iterator UI = RdxParts[part]->user_begin();
4228 UI != RdxParts[part]->user_end();)
4230 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd);
4231 RdxParts[part] = Extnd;
4236 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4237 for (unsigned part = 0; part < UF; ++part)
4238 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
4241 // Reduce all of the unrolled parts into a single vector.
4242 Value *ReducedPartRdx = RdxParts[0];
4243 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
4244 setDebugLocFromInst(Builder, ReducedPartRdx);
4245 for (unsigned part = 1; part < UF; ++part) {
4246 if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4247 // Floating point operations had to be 'fast' to enable the reduction.
4248 ReducedPartRdx = addFastMathFlag(
4249 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part],
4250 ReducedPartRdx, "bin.rdx"));
4252 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
4253 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]);
4257 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
4258 // and vector ops, reducing the set of values being computed by half each
4260 assert(isPowerOf2_32(VF) &&
4261 "Reduction emission only supported for pow2 vectors!");
4262 Value *TmpVec = ReducedPartRdx;
4263 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr);
4264 for (unsigned i = VF; i != 1; i >>= 1) {
4265 // Move the upper half of the vector to the lower half.
4266 for (unsigned j = 0; j != i / 2; ++j)
4267 ShuffleMask[j] = Builder.getInt32(i / 2 + j);
4269 // Fill the rest of the mask with undef.
4270 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(),
4271 UndefValue::get(Builder.getInt32Ty()));
4273 Value *Shuf = Builder.CreateShuffleVector(
4274 TmpVec, UndefValue::get(TmpVec->getType()),
4275 ConstantVector::get(ShuffleMask), "rdx.shuf");
4277 if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4278 // Floating point operations had to be 'fast' to enable the reduction.
4279 TmpVec = addFastMathFlag(Builder.CreateBinOp(
4280 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx"));
4282 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind,
4286 // The result is in the first element of the vector.
4288 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
4290 // If the reduction can be performed in a smaller type, we need to extend
4291 // the reduction to the wider type before we branch to the original loop.
4292 if (Phi->getType() != RdxDesc.getRecurrenceType())
4295 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4296 : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4299 // Create a phi node that merges control-flow from the backedge-taken check
4300 // block and the middle block.
4301 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4302 LoopScalarPreHeader->getTerminator());
4303 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4304 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4305 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4307 // Now, we need to fix the users of the reduction variable
4308 // inside and outside of the scalar remainder loop.
4309 // We know that the loop is in LCSSA form. We need to update the
4310 // PHI nodes in the exit blocks.
4311 for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
4312 LEE = LoopExitBlock->end();
4313 LEI != LEE; ++LEI) {
4314 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
4318 // All PHINodes need to have a single entry edge, or two if
4319 // we already fixed them.
4320 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
4322 // We found a reduction value exit-PHI. Update it with the
4323 // incoming bypass edge.
4324 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst)
4325 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4326 } // end of the LCSSA phi scan.
4328 // Fix the scalar loop reduction variable with the incoming reduction sum
4329 // from the vector body and from the backedge value.
4330 int IncomingEdgeBlockIdx =
4331 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4332 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4333 // Pick the other block.
4334 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4335 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4336 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4339 void InnerLoopVectorizer::fixLCSSAPHIs() {
4340 for (Instruction &LEI : *LoopExitBlock) {
4341 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI);
4344 if (LCSSAPhi->getNumIncomingValues() == 1)
4345 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()),
4350 void InnerLoopVectorizer::collectTriviallyDeadInstructions(
4351 SmallPtrSetImpl<Instruction *> &DeadInstructions) {
4352 BasicBlock *Latch = OrigLoop->getLoopLatch();
4354 // We create new control-flow for the vectorized loop, so the original
4355 // condition will be dead after vectorization if it's only used by the
4357 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4358 if (Cmp && Cmp->hasOneUse())
4359 DeadInstructions.insert(Cmp);
4361 // We create new "steps" for induction variable updates to which the original
4362 // induction variables map. An original update instruction will be dead if
4363 // all its users except the induction variable are dead.
4364 for (auto &Induction : *Legal->getInductionVars()) {
4365 PHINode *Ind = Induction.first;
4366 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4367 if (all_of(IndUpdate->users(), [&](User *U) -> bool {
4368 return U == Ind || DeadInstructions.count(cast<Instruction>(U));
4370 DeadInstructions.insert(IndUpdate);
4374 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4376 // The basic block and loop containing the predicated instruction.
4377 auto *PredBB = PredInst->getParent();
4378 auto *VectorLoop = LI->getLoopFor(PredBB);
4380 // Initialize a worklist with the operands of the predicated instruction.
4381 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4383 // Holds instructions that we need to analyze again. An instruction may be
4384 // reanalyzed if we don't yet know if we can sink it or not.
4385 SmallVector<Instruction *, 8> InstsToReanalyze;
4387 // Returns true if a given use occurs in the predicated block. Phi nodes use
4388 // their operands in their corresponding predecessor blocks.
4389 auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4390 auto *I = cast<Instruction>(U.getUser());
4391 BasicBlock *BB = I->getParent();
4392 if (auto *Phi = dyn_cast<PHINode>(I))
4393 BB = Phi->getIncomingBlock(
4394 PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4395 return BB == PredBB;
4398 // Iteratively sink the scalarized operands of the predicated instruction
4399 // into the block we created for it. When an instruction is sunk, it's
4400 // operands are then added to the worklist. The algorithm ends after one pass
4401 // through the worklist doesn't sink a single instruction.
4405 // Add the instructions that need to be reanalyzed to the worklist, and
4406 // reset the changed indicator.
4407 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4408 InstsToReanalyze.clear();
4411 while (!Worklist.empty()) {
4412 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4414 // We can't sink an instruction if it is a phi node, is already in the
4415 // predicated block, is not in the loop, or may have side effects.
4416 if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4417 !VectorLoop->contains(I) || I->mayHaveSideEffects())
4420 // It's legal to sink the instruction if all its uses occur in the
4421 // predicated block. Otherwise, there's nothing to do yet, and we may
4422 // need to reanalyze the instruction.
4423 if (!all_of(I->uses(), isBlockOfUsePredicated)) {
4424 InstsToReanalyze.push_back(I);
4428 // Move the instruction to the beginning of the predicated block, and add
4429 // it's operands to the worklist.
4430 I->moveBefore(&*PredBB->getFirstInsertionPt());
4431 Worklist.insert(I->op_begin(), I->op_end());
4433 // The sinking may have enabled other instructions to be sunk, so we will
4440 void InnerLoopVectorizer::predicateInstructions() {
4442 // For each instruction I marked for predication on value C, split I into its
4443 // own basic block to form an if-then construct over C. Since I may be fed by
4444 // an extractelement instruction or other scalar operand, we try to
4445 // iteratively sink its scalar operands into the predicated block. If I feeds
4446 // an insertelement instruction, we try to move this instruction into the
4447 // predicated block as well. For non-void types, a phi node will be created
4448 // for the resulting value (either vector or scalar).
4450 // So for some predicated instruction, e.g. the conditional sdiv in:
4454 // %add = add nsw i32 %mul, %0
4455 // %cmp5 = icmp sgt i32 %2, 7
4456 // br i1 %cmp5, label %if.then, label %if.end
4459 // %div = sdiv i32 %0, %1
4463 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ]
4465 // the sdiv at this point is scalarized and if-converted using a select.
4466 // The inactive elements in the vector are not used, but the predicated
4467 // instruction is still executed for all vector elements, essentially:
4471 // %17 = add nsw <2 x i32> %16, %wide.load
4472 // %29 = extractelement <2 x i32> %wide.load, i32 0
4473 // %30 = extractelement <2 x i32> %wide.load51, i32 0
4474 // %31 = sdiv i32 %29, %30
4475 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0
4476 // %35 = extractelement <2 x i32> %wide.load, i32 1
4477 // %36 = extractelement <2 x i32> %wide.load51, i32 1
4478 // %37 = sdiv i32 %35, %36
4479 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1
4480 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17
4482 // Predication will now re-introduce the original control flow to avoid false
4483 // side-effects by the sdiv instructions on the inactive elements, yielding
4488 // %5 = add nsw <2 x i32> %4, %wide.load
4489 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7>
4490 // %9 = extractelement <2 x i1> %8, i32 0
4491 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue
4494 // %10 = extractelement <2 x i32> %wide.load, i32 0
4495 // %11 = extractelement <2 x i32> %wide.load51, i32 0
4496 // %12 = sdiv i32 %10, %11
4497 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0
4498 // br label %pred.sdiv.continue
4500 // pred.sdiv.continue:
4501 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ]
4502 // %15 = extractelement <2 x i1> %8, i32 1
4503 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55
4506 // %16 = extractelement <2 x i32> %wide.load, i32 1
4507 // %17 = extractelement <2 x i32> %wide.load51, i32 1
4508 // %18 = sdiv i32 %16, %17
4509 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1
4510 // br label %pred.sdiv.continue55
4512 // pred.sdiv.continue55:
4513 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ]
4514 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5
4516 for (auto KV : PredicatedInstructions) {
4517 BasicBlock::iterator I(KV.first);
4518 BasicBlock *Head = I->getParent();
4519 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI);
4520 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false,
4521 /*BranchWeights=*/nullptr, DT, LI);
4523 sinkScalarOperands(&*I);
4525 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if");
4526 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue");
4528 // If the instruction is non-void create a Phi node at reconvergence point.
4529 if (!I->getType()->isVoidTy()) {
4530 Value *IncomingTrue = nullptr;
4531 Value *IncomingFalse = nullptr;
4533 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) {
4534 // If the predicated instruction is feeding an insert-element, move it
4535 // into the Then block; Phi node will be created for the vector.
4536 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin());
4538 IncomingTrue = IEI; // the new vector with the inserted element.
4539 IncomingFalse = IEI->getOperand(0); // the unmodified vector
4541 // Phi node will be created for the scalar predicated instruction.
4543 IncomingFalse = UndefValue::get(I->getType());
4546 BasicBlock *PostDom = I->getParent()->getSingleSuccessor();
4547 assert(PostDom && "Then block has multiple successors");
4549 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front());
4550 IncomingTrue->replaceAllUsesWith(Phi);
4551 Phi->addIncoming(IncomingFalse, Head);
4552 Phi->addIncoming(IncomingTrue, I->getParent());
4556 DEBUG(DT->verifyDomTree());
4559 InnerLoopVectorizer::VectorParts
4560 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
4561 assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
4563 // Look for cached value.
4564 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
4565 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
4566 if (ECEntryIt != EdgeMaskCache.end())
4567 return ECEntryIt->second;
4569 VectorParts SrcMask = createBlockInMask(Src);
4571 // The terminator has to be a branch inst!
4572 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
4573 assert(BI && "Unexpected terminator found");
4575 if (BI->isConditional()) {
4576 VectorParts EdgeMask = getVectorValue(BI->getCondition());
4578 if (BI->getSuccessor(0) != Dst)
4579 for (unsigned part = 0; part < UF; ++part)
4580 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]);
4582 for (unsigned part = 0; part < UF; ++part)
4583 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]);
4585 EdgeMaskCache[Edge] = EdgeMask;
4589 EdgeMaskCache[Edge] = SrcMask;
4593 InnerLoopVectorizer::VectorParts
4594 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
4595 assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
4597 // Look for cached value.
4598 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
4599 if (BCEntryIt != BlockMaskCache.end())
4600 return BCEntryIt->second;
4602 // Loop incoming mask is all-one.
4603 if (OrigLoop->getHeader() == BB) {
4604 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1);
4605 const VectorParts &BlockMask = getVectorValue(C);
4606 BlockMaskCache[BB] = BlockMask;
4610 // This is the block mask. We OR all incoming edges, and with zero.
4611 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0);
4612 VectorParts BlockMask = getVectorValue(Zero);
4615 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) {
4616 VectorParts EM = createEdgeMask(*it, BB);
4617 for (unsigned part = 0; part < UF; ++part)
4618 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]);
4621 BlockMaskCache[BB] = BlockMask;
4625 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4627 PHINode *P = cast<PHINode>(PN);
4628 // In order to support recurrences we need to be able to vectorize Phi nodes.
4629 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4630 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4631 // this value when we vectorize all of the instructions that use the PHI.
4632 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4633 VectorParts Entry(UF);
4634 for (unsigned part = 0; part < UF; ++part) {
4635 // This is phase one of vectorizing PHIs.
4637 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4638 Entry[part] = PHINode::Create(
4639 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4641 VectorLoopValueMap.initVector(P, Entry);
4645 setDebugLocFromInst(Builder, P);
4646 // Check for PHI nodes that are lowered to vector selects.
4647 if (P->getParent() != OrigLoop->getHeader()) {
4648 // We know that all PHIs in non-header blocks are converted into
4649 // selects, so we don't have to worry about the insertion order and we
4650 // can just use the builder.
4651 // At this point we generate the predication tree. There may be
4652 // duplications since this is a simple recursive scan, but future
4653 // optimizations will clean it up.
4655 unsigned NumIncoming = P->getNumIncomingValues();
4657 // Generate a sequence of selects of the form:
4658 // SELECT(Mask3, In3,
4659 // SELECT(Mask2, In2,
4661 VectorParts Entry(UF);
4662 for (unsigned In = 0; In < NumIncoming; In++) {
4664 createEdgeMask(P->getIncomingBlock(In), P->getParent());
4665 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In));
4667 for (unsigned part = 0; part < UF; ++part) {
4668 // We might have single edge PHIs (blocks) - use an identity
4669 // 'select' for the first PHI operand.
4671 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]);
4673 // Select between the current value and the previous incoming edge
4674 // based on the incoming mask.
4675 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part],
4679 VectorLoopValueMap.initVector(P, Entry);
4683 // This PHINode must be an induction variable.
4684 // Make sure that we know about it.
4685 assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
4687 InductionDescriptor II = Legal->getInductionVars()->lookup(P);
4688 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4690 // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4691 // which can be found from the original scalar operations.
4692 switch (II.getKind()) {
4693 case InductionDescriptor::IK_NoInduction:
4694 llvm_unreachable("Unknown induction");
4695 case InductionDescriptor::IK_IntInduction:
4696 case InductionDescriptor::IK_FpInduction:
4697 return widenIntOrFpInduction(P);
4698 case InductionDescriptor::IK_PtrInduction: {
4699 // Handle the pointer induction variable case.
4700 assert(P->getType()->isPointerTy() && "Unexpected type.");
4701 // This is the normalized GEP that starts counting at zero.
4702 Value *PtrInd = Induction;
4703 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4704 // Determine the number of scalars we need to generate for each unroll
4705 // iteration. If the instruction is uniform, we only need to generate the
4706 // first lane. Otherwise, we generate all VF values.
4707 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4708 // These are the scalar results. Notice that we don't generate vector GEPs
4709 // because scalar GEPs result in better code.
4710 ScalarParts Entry(UF);
4711 for (unsigned Part = 0; Part < UF; ++Part) {
4712 Entry[Part].resize(VF);
4713 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4714 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4715 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4716 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
4717 SclrGep->setName("next.gep");
4718 Entry[Part][Lane] = SclrGep;
4721 VectorLoopValueMap.initScalar(P, Entry);
4727 /// A helper function for checking whether an integer division-related
4728 /// instruction may divide by zero (in which case it must be predicated if
4729 /// executed conditionally in the scalar code).
4730 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4731 /// Non-zero divisors that are non compile-time constants will not be
4732 /// converted into multiplication, so we will still end up scalarizing
4733 /// the division, but can do so w/o predication.
4734 static bool mayDivideByZero(Instruction &I) {
4735 assert((I.getOpcode() == Instruction::UDiv ||
4736 I.getOpcode() == Instruction::SDiv ||
4737 I.getOpcode() == Instruction::URem ||
4738 I.getOpcode() == Instruction::SRem) &&
4739 "Unexpected instruction");
4740 Value *Divisor = I.getOperand(1);
4741 auto *CInt = dyn_cast<ConstantInt>(Divisor);
4742 return !CInt || CInt->isZero();
4745 void InnerLoopVectorizer::vectorizeInstruction(Instruction &I) {
4746 // Scalarize instructions that should remain scalar after vectorization.
4748 !(isa<BranchInst>(&I) || isa<PHINode>(&I) || isa<DbgInfoIntrinsic>(&I)) &&
4749 shouldScalarizeInstruction(&I)) {
4750 scalarizeInstruction(&I, Legal->isScalarWithPredication(&I));
4754 switch (I.getOpcode()) {
4755 case Instruction::Br:
4756 // Nothing to do for PHIs and BR, since we already took care of the
4757 // loop control flow instructions.
4759 case Instruction::PHI: {
4760 // Vectorize PHINodes.
4761 widenPHIInstruction(&I, UF, VF);
4764 case Instruction::GetElementPtr: {
4765 // Construct a vector GEP by widening the operands of the scalar GEP as
4766 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4767 // results in a vector of pointers when at least one operand of the GEP
4768 // is vector-typed. Thus, to keep the representation compact, we only use
4769 // vector-typed operands for loop-varying values.
4770 auto *GEP = cast<GetElementPtrInst>(&I);
4771 VectorParts Entry(UF);
4773 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
4774 // If we are vectorizing, but the GEP has only loop-invariant operands,
4775 // the GEP we build (by only using vector-typed operands for
4776 // loop-varying values) would be a scalar pointer. Thus, to ensure we
4777 // produce a vector of pointers, we need to either arbitrarily pick an
4778 // operand to broadcast, or broadcast a clone of the original GEP.
4779 // Here, we broadcast a clone of the original.
4781 // TODO: If at some point we decide to scalarize instructions having
4782 // loop-invariant operands, this special case will no longer be
4783 // required. We would add the scalarization decision to
4784 // collectLoopScalars() and teach getVectorValue() to broadcast
4785 // the lane-zero scalar value.
4786 auto *Clone = Builder.Insert(GEP->clone());
4787 for (unsigned Part = 0; Part < UF; ++Part)
4788 Entry[Part] = Builder.CreateVectorSplat(VF, Clone);
4790 // If the GEP has at least one loop-varying operand, we are sure to
4791 // produce a vector of pointers. But if we are only unrolling, we want
4792 // to produce a scalar GEP for each unroll part. Thus, the GEP we
4793 // produce with the code below will be scalar (if VF == 1) or vector
4794 // (otherwise). Note that for the unroll-only case, we still maintain
4795 // values in the vector mapping with initVector, as we do for other
4797 for (unsigned Part = 0; Part < UF; ++Part) {
4799 // The pointer operand of the new GEP. If it's loop-invariant, we
4800 // won't broadcast it.
4801 auto *Ptr = OrigLoop->isLoopInvariant(GEP->getPointerOperand())
4802 ? GEP->getPointerOperand()
4803 : getVectorValue(GEP->getPointerOperand())[Part];
4805 // Collect all the indices for the new GEP. If any index is
4806 // loop-invariant, we won't broadcast it.
4807 SmallVector<Value *, 4> Indices;
4808 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
4809 if (OrigLoop->isLoopInvariant(U.get()))
4810 Indices.push_back(U.get());
4812 Indices.push_back(getVectorValue(U.get())[Part]);
4815 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4816 // but it should be a vector, otherwise.
4817 auto *NewGEP = GEP->isInBounds()
4818 ? Builder.CreateInBoundsGEP(Ptr, Indices)
4819 : Builder.CreateGEP(Ptr, Indices);
4820 assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4821 "NewGEP is not a pointer vector");
4822 Entry[Part] = NewGEP;
4826 VectorLoopValueMap.initVector(&I, Entry);
4827 addMetadata(Entry, GEP);
4830 case Instruction::UDiv:
4831 case Instruction::SDiv:
4832 case Instruction::SRem:
4833 case Instruction::URem:
4834 // Scalarize with predication if this instruction may divide by zero and
4835 // block execution is conditional, otherwise fallthrough.
4836 if (Legal->isScalarWithPredication(&I)) {
4837 scalarizeInstruction(&I, true);
4840 case Instruction::Add:
4841 case Instruction::FAdd:
4842 case Instruction::Sub:
4843 case Instruction::FSub:
4844 case Instruction::Mul:
4845 case Instruction::FMul:
4846 case Instruction::FDiv:
4847 case Instruction::FRem:
4848 case Instruction::Shl:
4849 case Instruction::LShr:
4850 case Instruction::AShr:
4851 case Instruction::And:
4852 case Instruction::Or:
4853 case Instruction::Xor: {
4854 // Just widen binops.
4855 auto *BinOp = cast<BinaryOperator>(&I);
4856 setDebugLocFromInst(Builder, BinOp);
4857 const VectorParts &A = getVectorValue(BinOp->getOperand(0));
4858 const VectorParts &B = getVectorValue(BinOp->getOperand(1));
4860 // Use this vector value for all users of the original instruction.
4861 VectorParts Entry(UF);
4862 for (unsigned Part = 0; Part < UF; ++Part) {
4863 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]);
4865 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
4866 VecOp->copyIRFlags(BinOp);
4871 VectorLoopValueMap.initVector(&I, Entry);
4872 addMetadata(Entry, BinOp);
4875 case Instruction::Select: {
4877 // If the selector is loop invariant we can create a select
4878 // instruction with a scalar condition. Otherwise, use vector-select.
4879 auto *SE = PSE.getSE();
4880 bool InvariantCond =
4881 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4882 setDebugLocFromInst(Builder, &I);
4884 // The condition can be loop invariant but still defined inside the
4885 // loop. This means that we can't just use the original 'cond' value.
4886 // We have to take the 'vectorized' value and pick the first lane.
4887 // Instcombine will make this a no-op.
4888 const VectorParts &Cond = getVectorValue(I.getOperand(0));
4889 const VectorParts &Op0 = getVectorValue(I.getOperand(1));
4890 const VectorParts &Op1 = getVectorValue(I.getOperand(2));
4892 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0);
4894 VectorParts Entry(UF);
4895 for (unsigned Part = 0; Part < UF; ++Part) {
4896 Entry[Part] = Builder.CreateSelect(
4897 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]);
4900 VectorLoopValueMap.initVector(&I, Entry);
4901 addMetadata(Entry, &I);
4905 case Instruction::ICmp:
4906 case Instruction::FCmp: {
4907 // Widen compares. Generate vector compares.
4908 bool FCmp = (I.getOpcode() == Instruction::FCmp);
4909 auto *Cmp = dyn_cast<CmpInst>(&I);
4910 setDebugLocFromInst(Builder, Cmp);
4911 const VectorParts &A = getVectorValue(Cmp->getOperand(0));
4912 const VectorParts &B = getVectorValue(Cmp->getOperand(1));
4913 VectorParts Entry(UF);
4914 for (unsigned Part = 0; Part < UF; ++Part) {
4917 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
4918 cast<FCmpInst>(C)->copyFastMathFlags(Cmp);
4920 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]);
4925 VectorLoopValueMap.initVector(&I, Entry);
4926 addMetadata(Entry, &I);
4930 case Instruction::Store:
4931 case Instruction::Load:
4932 vectorizeMemoryInstruction(&I);
4934 case Instruction::ZExt:
4935 case Instruction::SExt:
4936 case Instruction::FPToUI:
4937 case Instruction::FPToSI:
4938 case Instruction::FPExt:
4939 case Instruction::PtrToInt:
4940 case Instruction::IntToPtr:
4941 case Instruction::SIToFP:
4942 case Instruction::UIToFP:
4943 case Instruction::Trunc:
4944 case Instruction::FPTrunc:
4945 case Instruction::BitCast: {
4946 auto *CI = dyn_cast<CastInst>(&I);
4947 setDebugLocFromInst(Builder, CI);
4949 // Optimize the special case where the source is a constant integer
4950 // induction variable. Notice that we can only optimize the 'trunc' case
4951 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
4952 // (c) other casts depend on pointer size.
4953 if (Cost->isOptimizableIVTruncate(CI, VF)) {
4954 widenIntOrFpInduction(cast<PHINode>(CI->getOperand(0)),
4955 cast<TruncInst>(CI));
4959 /// Vectorize casts.
4961 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4963 const VectorParts &A = getVectorValue(CI->getOperand(0));
4964 VectorParts Entry(UF);
4965 for (unsigned Part = 0; Part < UF; ++Part)
4966 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy);
4967 VectorLoopValueMap.initVector(&I, Entry);
4968 addMetadata(Entry, &I);
4972 case Instruction::Call: {
4973 // Ignore dbg intrinsics.
4974 if (isa<DbgInfoIntrinsic>(I))
4976 setDebugLocFromInst(Builder, &I);
4978 Module *M = I.getParent()->getParent()->getParent();
4979 auto *CI = cast<CallInst>(&I);
4981 StringRef FnName = CI->getCalledFunction()->getName();
4982 Function *F = CI->getCalledFunction();
4983 Type *RetTy = ToVectorTy(CI->getType(), VF);
4984 SmallVector<Type *, 4> Tys;
4985 for (Value *ArgOperand : CI->arg_operands())
4986 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4988 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4989 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
4990 ID == Intrinsic::lifetime_start)) {
4991 scalarizeInstruction(&I);
4994 // The flag shows whether we use Intrinsic or a usual Call for vectorized
4995 // version of the instruction.
4996 // Is it beneficial to perform intrinsic call compared to lib call?
4997 bool NeedToScalarize;
4998 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4999 bool UseVectorIntrinsic =
5000 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
5001 if (!UseVectorIntrinsic && NeedToScalarize) {
5002 scalarizeInstruction(&I);
5006 VectorParts Entry(UF);
5007 for (unsigned Part = 0; Part < UF; ++Part) {
5008 SmallVector<Value *, 4> Args;
5009 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
5010 Value *Arg = CI->getArgOperand(i);
5011 // Some intrinsics have a scalar argument - don't replace it with a
5013 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) {
5014 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i));
5015 Arg = VectorArg[Part];
5017 Args.push_back(Arg);
5021 if (UseVectorIntrinsic) {
5022 // Use vector version of the intrinsic.
5023 Type *TysForDecl[] = {CI->getType()};
5025 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5026 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5028 // Use vector version of the library call.
5029 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
5030 assert(!VFnName.empty() && "Vector function name is empty.");
5031 VectorF = M->getFunction(VFnName);
5033 // Generate a declaration
5034 FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
5036 Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
5037 VectorF->copyAttributesFrom(F);
5040 assert(VectorF && "Can't create vector function.");
5042 SmallVector<OperandBundleDef, 1> OpBundles;
5043 CI->getOperandBundlesAsDefs(OpBundles);
5044 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5046 if (isa<FPMathOperator>(V))
5047 V->copyFastMathFlags(CI);
5052 VectorLoopValueMap.initVector(&I, Entry);
5053 addMetadata(Entry, &I);
5058 // All other instructions are unsupported. Scalarize them.
5059 scalarizeInstruction(&I);
5064 void InnerLoopVectorizer::updateAnalysis() {
5065 // Forget the original basic block.
5066 PSE.getSE()->forgetLoop(OrigLoop);
5068 // Update the dominator tree information.
5069 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
5070 "Entry does not dominate exit.");
5072 DT->addNewBlock(LI->getLoopFor(LoopVectorBody)->getHeader(),
5073 LoopVectorPreHeader);
5074 DT->addNewBlock(LoopMiddleBlock,
5075 LI->getLoopFor(LoopVectorBody)->getLoopLatch());
5076 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
5077 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
5078 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
5080 DEBUG(DT->verifyDomTree());
5083 /// \brief Check whether it is safe to if-convert this phi node.
5085 /// Phi nodes with constant expressions that can trap are not safe to if
5087 static bool canIfConvertPHINodes(BasicBlock *BB) {
5088 for (Instruction &I : *BB) {
5089 auto *Phi = dyn_cast<PHINode>(&I);
5092 for (Value *V : Phi->incoming_values())
5093 if (auto *C = dyn_cast<Constant>(V))
5100 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
5101 if (!EnableIfConversion) {
5102 ORE->emit(createMissedAnalysis("IfConversionDisabled")
5103 << "if-conversion is disabled");
5107 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
5109 // A list of pointers that we can safely read and write to.
5110 SmallPtrSet<Value *, 8> SafePointes;
5112 // Collect safe addresses.
5113 for (BasicBlock *BB : TheLoop->blocks()) {
5114 if (blockNeedsPredication(BB))
5117 for (Instruction &I : *BB)
5118 if (auto *Ptr = getPointerOperand(&I))
5119 SafePointes.insert(Ptr);
5122 // Collect the blocks that need predication.
5123 BasicBlock *Header = TheLoop->getHeader();
5124 for (BasicBlock *BB : TheLoop->blocks()) {
5125 // We don't support switch statements inside loops.
5126 if (!isa<BranchInst>(BB->getTerminator())) {
5127 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator())
5128 << "loop contains a switch statement");
5132 // We must be able to predicate all blocks that need to be predicated.
5133 if (blockNeedsPredication(BB)) {
5134 if (!blockCanBePredicated(BB, SafePointes)) {
5135 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
5136 << "control flow cannot be substituted for a select");
5139 } else if (BB != Header && !canIfConvertPHINodes(BB)) {
5140 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
5141 << "control flow cannot be substituted for a select");
5146 // We can if-convert this loop.
5150 bool LoopVectorizationLegality::canVectorize() {
5151 // We must have a loop in canonical form. Loops with indirectbr in them cannot
5152 // be canonicalized.
5153 if (!TheLoop->getLoopPreheader()) {
5154 ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5155 << "loop control flow is not understood by vectorizer");
5159 // FIXME: The code is currently dead, since the loop gets sent to
5160 // LoopVectorizationLegality is already an innermost loop.
5162 // We can only vectorize innermost loops.
5163 if (!TheLoop->empty()) {
5164 ORE->emit(createMissedAnalysis("NotInnermostLoop")
5165 << "loop is not the innermost loop");
5169 // We must have a single backedge.
5170 if (TheLoop->getNumBackEdges() != 1) {
5171 ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5172 << "loop control flow is not understood by vectorizer");
5176 // We must have a single exiting block.
5177 if (!TheLoop->getExitingBlock()) {
5178 ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5179 << "loop control flow is not understood by vectorizer");
5183 // We only handle bottom-tested loops, i.e. loop in which the condition is
5184 // checked at the end of each iteration. With that we can assume that all
5185 // instructions in the loop are executed the same number of times.
5186 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5187 ORE->emit(createMissedAnalysis("CFGNotUnderstood")
5188 << "loop control flow is not understood by vectorizer");
5192 // We need to have a loop header.
5193 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
5196 // Check if we can if-convert non-single-bb loops.
5197 unsigned NumBlocks = TheLoop->getNumBlocks();
5198 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
5199 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
5203 // ScalarEvolution needs to be able to find the exit count.
5204 const SCEV *ExitCount = PSE.getBackedgeTakenCount();
5205 if (ExitCount == PSE.getSE()->getCouldNotCompute()) {
5206 ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations")
5207 << "could not determine number of loop iterations");
5208 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
5212 // Check if we can vectorize the instructions and CFG in this loop.
5213 if (!canVectorizeInstrs()) {
5214 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
5218 // Go over each instruction and look at memory deps.
5219 if (!canVectorizeMemory()) {
5220 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
5224 DEBUG(dbgs() << "LV: We can vectorize this loop"
5225 << (LAI->getRuntimePointerChecking()->Need
5226 ? " (with a runtime bound check)"
5230 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
5232 // If an override option has been passed in for interleaved accesses, use it.
5233 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
5234 UseInterleaved = EnableInterleavedMemAccesses;
5236 // Analyze interleaved memory accesses.
5238 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides());
5240 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
5241 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
5242 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
5244 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
5245 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks")
5246 << "Too many SCEV assumptions need to be made and checked "
5248 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
5252 // Okay! We can vectorize. At this point we don't have any other mem analysis
5253 // which may limit our maximum vectorization factor, so just return true with
5258 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
5259 if (Ty->isPointerTy())
5260 return DL.getIntPtrType(Ty);
5262 // It is possible that char's or short's overflow when we ask for the loop's
5263 // trip count, work around this by changing the type size.
5264 if (Ty->getScalarSizeInBits() < 32)
5265 return Type::getInt32Ty(Ty->getContext());
5270 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
5271 Ty0 = convertPointerToIntegerType(DL, Ty0);
5272 Ty1 = convertPointerToIntegerType(DL, Ty1);
5273 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
5278 /// \brief Check that the instruction has outside loop users and is not an
5279 /// identified reduction variable.
5280 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
5281 SmallPtrSetImpl<Value *> &AllowedExit) {
5282 // Reduction and Induction instructions are allowed to have exit users. All
5283 // other instructions must not have external users.
5284 if (!AllowedExit.count(Inst))
5285 // Check that all of the users of the loop are inside the BB.
5286 for (User *U : Inst->users()) {
5287 Instruction *UI = cast<Instruction>(U);
5288 // This user may be a reduction exit value.
5289 if (!TheLoop->contains(UI)) {
5290 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
5297 void LoopVectorizationLegality::addInductionPhi(
5298 PHINode *Phi, const InductionDescriptor &ID,
5299 SmallPtrSetImpl<Value *> &AllowedExit) {
5300 Inductions[Phi] = ID;
5301 Type *PhiTy = Phi->getType();
5302 const DataLayout &DL = Phi->getModule()->getDataLayout();
5304 // Get the widest type.
5305 if (!PhiTy->isFloatingPointTy()) {
5307 WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
5309 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
5312 // Int inductions are special because we only allow one IV.
5313 if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
5314 ID.getConstIntStepValue() &&
5315 ID.getConstIntStepValue()->isOne() &&
5316 isa<Constant>(ID.getStartValue()) &&
5317 cast<Constant>(ID.getStartValue())->isNullValue()) {
5319 // Use the phi node with the widest type as induction. Use the last
5320 // one if there are multiple (no good reason for doing this other
5321 // than it is expedient). We've checked that it begins at zero and
5322 // steps by one, so this is a canonical induction variable.
5323 if (!PrimaryInduction || PhiTy == WidestIndTy)
5324 PrimaryInduction = Phi;
5327 // Both the PHI node itself, and the "post-increment" value feeding
5328 // back into the PHI node may have external users.
5329 AllowedExit.insert(Phi);
5330 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
5332 DEBUG(dbgs() << "LV: Found an induction variable.\n");
5336 bool LoopVectorizationLegality::canVectorizeInstrs() {
5337 BasicBlock *Header = TheLoop->getHeader();
5339 // Look for the attribute signaling the absence of NaNs.
5340 Function &F = *Header->getParent();
5342 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
5344 // For each block in the loop.
5345 for (BasicBlock *BB : TheLoop->blocks()) {
5346 // Scan the instructions in the block and look for hazards.
5347 for (Instruction &I : *BB) {
5348 if (auto *Phi = dyn_cast<PHINode>(&I)) {
5349 Type *PhiTy = Phi->getType();
5350 // Check that this PHI type is allowed.
5351 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
5352 !PhiTy->isPointerTy()) {
5353 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5354 << "loop control flow is not understood by vectorizer");
5355 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
5359 // If this PHINode is not in the header block, then we know that we
5360 // can convert it to select during if-conversion. No need to check if
5361 // the PHIs in this block are induction or reduction variables.
5363 // Check that this instruction has no outside users or is an
5364 // identified reduction value with an outside user.
5365 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit))
5367 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi)
5368 << "value could not be identified as "
5369 "an induction or reduction variable");
5373 // We only allow if-converted PHIs with exactly two incoming values.
5374 if (Phi->getNumIncomingValues() != 2) {
5375 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
5376 << "control flow not understood by vectorizer");
5377 DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
5381 RecurrenceDescriptor RedDes;
5382 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) {
5383 if (RedDes.hasUnsafeAlgebra())
5384 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst());
5385 AllowedExit.insert(RedDes.getLoopExitInstr());
5386 Reductions[Phi] = RedDes;
5390 InductionDescriptor ID;
5391 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
5392 addInductionPhi(Phi, ID, AllowedExit);
5393 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr)
5394 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst());
5398 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) {
5399 FirstOrderRecurrences.insert(Phi);
5403 // As a last resort, coerce the PHI to a AddRec expression
5404 // and re-try classifying it a an induction PHI.
5405 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
5406 addInductionPhi(Phi, ID, AllowedExit);
5410 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi)
5411 << "value that could not be identified as "
5412 "reduction is used outside the loop");
5413 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
5415 } // end of PHI handling
5417 // We handle calls that:
5418 // * Are debug info intrinsics.
5419 // * Have a mapping to an IR intrinsic.
5420 // * Have a vector version available.
5421 auto *CI = dyn_cast<CallInst>(&I);
5422 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
5423 !isa<DbgInfoIntrinsic>(CI) &&
5424 !(CI->getCalledFunction() && TLI &&
5425 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
5426 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
5427 << "call instruction cannot be vectorized");
5428 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
5432 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
5433 // second argument is the same (i.e. loop invariant)
5434 if (CI && hasVectorInstrinsicScalarOpd(
5435 getVectorIntrinsicIDForCall(CI, TLI), 1)) {
5436 auto *SE = PSE.getSE();
5437 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
5438 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI)
5439 << "intrinsic instruction cannot be vectorized");
5440 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
5445 // Check that the instruction return type is vectorizable.
5446 // Also, we can't vectorize extractelement instructions.
5447 if ((!VectorType::isValidElementType(I.getType()) &&
5448 !I.getType()->isVoidTy()) ||
5449 isa<ExtractElementInst>(I)) {
5450 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I)
5451 << "instruction return type cannot be vectorized");
5452 DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
5456 // Check that the stored type is vectorizable.
5457 if (auto *ST = dyn_cast<StoreInst>(&I)) {
5458 Type *T = ST->getValueOperand()->getType();
5459 if (!VectorType::isValidElementType(T)) {
5460 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST)
5461 << "store instruction cannot be vectorized");
5465 // FP instructions can allow unsafe algebra, thus vectorizable by
5466 // non-IEEE-754 compliant SIMD units.
5467 // This applies to floating-point math operations and calls, not memory
5468 // operations, shuffles, or casts, as they don't change precision or
5470 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
5471 !I.hasUnsafeAlgebra()) {
5472 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
5473 Hints->setPotentiallyUnsafe();
5476 // Reduction instructions are allowed to have exit users.
5477 // All other instructions must not have external users.
5478 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
5479 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I)
5480 << "value cannot be used outside the loop");
5487 if (!PrimaryInduction) {
5488 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
5489 if (Inductions.empty()) {
5490 ORE->emit(createMissedAnalysis("NoInductionVariable")
5491 << "loop induction variable could not be identified");
5496 // Now we know the widest induction type, check if our found induction
5497 // is the same size. If it's not, unset it here and InnerLoopVectorizer
5498 // will create another.
5499 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
5500 PrimaryInduction = nullptr;
5505 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
5507 // We should not collect Scalars more than once per VF. Right now, this
5508 // function is called from collectUniformsAndScalars(), which already does
5509 // this check. Collecting Scalars for VF=1 does not make any sense.
5510 assert(VF >= 2 && !Scalars.count(VF) &&
5511 "This function should not be visited twice for the same VF");
5513 SmallSetVector<Instruction *, 8> Worklist;
5515 // These sets are used to seed the analysis with pointers used by memory
5516 // accesses that will remain scalar.
5517 SmallSetVector<Instruction *, 8> ScalarPtrs;
5518 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5520 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5521 // The pointer operands of loads and stores will be scalar as long as the
5522 // memory access is not a gather or scatter operation. The value operand of a
5523 // store will remain scalar if the store is scalarized.
5524 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5525 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5526 assert(WideningDecision != CM_Unknown &&
5527 "Widening decision should be ready at this moment");
5528 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5529 if (Ptr == Store->getValueOperand())
5530 return WideningDecision == CM_Scalarize;
5531 assert(Ptr == getPointerOperand(MemAccess) &&
5532 "Ptr is neither a value or pointer operand");
5533 return WideningDecision != CM_GatherScatter;
5536 // A helper that returns true if the given value is a bitcast or
5537 // getelementptr instruction contained in the loop.
5538 auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5539 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5540 isa<GetElementPtrInst>(V)) &&
5541 !TheLoop->isLoopInvariant(V);
5544 // A helper that evaluates a memory access's use of a pointer. If the use
5545 // will be a scalar use, and the pointer is only used by memory accesses, we
5546 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
5547 // PossibleNonScalarPtrs.
5548 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5550 // We only care about bitcast and getelementptr instructions contained in
5552 if (!isLoopVaryingBitCastOrGEP(Ptr))
5555 // If the pointer has already been identified as scalar (e.g., if it was
5556 // also identified as uniform), there's nothing to do.
5557 auto *I = cast<Instruction>(Ptr);
5558 if (Worklist.count(I))
5561 // If the use of the pointer will be a scalar use, and all users of the
5562 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5563 // place the pointer in PossibleNonScalarPtrs.
5564 if (isScalarUse(MemAccess, Ptr) && all_of(I->users(), [&](User *U) {
5565 return isa<LoadInst>(U) || isa<StoreInst>(U);
5567 ScalarPtrs.insert(I);
5569 PossibleNonScalarPtrs.insert(I);
5572 // We seed the scalars analysis with three classes of instructions: (1)
5573 // instructions marked uniform-after-vectorization, (2) bitcast and
5574 // getelementptr instructions used by memory accesses requiring a scalar use,
5575 // and (3) pointer induction variables and their update instructions (we
5576 // currently only scalarize these).
5578 // (1) Add to the worklist all instructions that have been identified as
5579 // uniform-after-vectorization.
5580 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5582 // (2) Add to the worklist all bitcast and getelementptr instructions used by
5583 // memory accesses requiring a scalar use. The pointer operands of loads and
5584 // stores will be scalar as long as the memory accesses is not a gather or
5585 // scatter operation. The value operand of a store will remain scalar if the
5586 // store is scalarized.
5587 for (auto *BB : TheLoop->blocks())
5588 for (auto &I : *BB) {
5589 if (auto *Load = dyn_cast<LoadInst>(&I)) {
5590 evaluatePtrUse(Load, Load->getPointerOperand());
5591 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5592 evaluatePtrUse(Store, Store->getPointerOperand());
5593 evaluatePtrUse(Store, Store->getValueOperand());
5596 for (auto *I : ScalarPtrs)
5597 if (!PossibleNonScalarPtrs.count(I)) {
5598 DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5602 // (3) Add to the worklist all pointer induction variables and their update
5605 // TODO: Once we are able to vectorize pointer induction variables we should
5606 // no longer insert them into the worklist here.
5607 auto *Latch = TheLoop->getLoopLatch();
5608 for (auto &Induction : *Legal->getInductionVars()) {
5609 auto *Ind = Induction.first;
5610 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5611 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
5613 Worklist.insert(Ind);
5614 Worklist.insert(IndUpdate);
5615 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5616 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
5619 // Expand the worklist by looking through any bitcasts and getelementptr
5620 // instructions we've already identified as scalar. This is similar to the
5621 // expansion step in collectLoopUniforms(); however, here we're only
5622 // expanding to include additional bitcasts and getelementptr instructions.
5624 while (Idx != Worklist.size()) {
5625 Instruction *Dst = Worklist[Idx++];
5626 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5628 auto *Src = cast<Instruction>(Dst->getOperand(0));
5629 if (all_of(Src->users(), [&](User *U) -> bool {
5630 auto *J = cast<Instruction>(U);
5631 return !TheLoop->contains(J) || Worklist.count(J) ||
5632 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5633 isScalarUse(J, Src));
5635 Worklist.insert(Src);
5636 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5640 // An induction variable will remain scalar if all users of the induction
5641 // variable and induction variable update remain scalar.
5642 for (auto &Induction : *Legal->getInductionVars()) {
5643 auto *Ind = Induction.first;
5644 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5646 // We already considered pointer induction variables, so there's no reason
5647 // to look at their users again.
5649 // TODO: Once we are able to vectorize pointer induction variables we
5650 // should no longer skip over them here.
5651 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
5654 // Determine if all users of the induction variable are scalar after
5656 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
5657 auto *I = cast<Instruction>(U);
5658 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5663 // Determine if all users of the induction variable update instruction are
5664 // scalar after vectorization.
5665 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
5666 auto *I = cast<Instruction>(U);
5667 return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5669 if (!ScalarIndUpdate)
5672 // The induction variable and its update instruction will remain scalar.
5673 Worklist.insert(Ind);
5674 Worklist.insert(IndUpdate);
5675 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5676 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
5679 Scalars[VF].insert(Worklist.begin(), Worklist.end());
5682 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) {
5683 if (!blockNeedsPredication(I->getParent()))
5685 switch(I->getOpcode()) {
5688 case Instruction::Store:
5689 return !isMaskRequired(I);
5690 case Instruction::UDiv:
5691 case Instruction::SDiv:
5692 case Instruction::SRem:
5693 case Instruction::URem:
5694 return mayDivideByZero(*I);
5699 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I,
5701 // Get and ensure we have a valid memory instruction.
5702 LoadInst *LI = dyn_cast<LoadInst>(I);
5703 StoreInst *SI = dyn_cast<StoreInst>(I);
5704 assert((LI || SI) && "Invalid memory instruction");
5706 auto *Ptr = getPointerOperand(I);
5708 // In order to be widened, the pointer should be consecutive, first of all.
5709 if (!isConsecutivePtr(Ptr))
5712 // If the instruction is a store located in a predicated block, it will be
5714 if (isScalarWithPredication(I))
5717 // If the instruction's allocated size doesn't equal it's type size, it
5718 // requires padding and will be scalarized.
5719 auto &DL = I->getModule()->getDataLayout();
5720 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5721 if (hasIrregularType(ScalarTy, DL, VF))
5727 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
5729 // We should not collect Uniforms more than once per VF. Right now,
5730 // this function is called from collectUniformsAndScalars(), which
5731 // already does this check. Collecting Uniforms for VF=1 does not make any
5734 assert(VF >= 2 && !Uniforms.count(VF) &&
5735 "This function should not be visited twice for the same VF");
5737 // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5738 // not analyze again. Uniforms.count(VF) will return 1.
5739 Uniforms[VF].clear();
5741 // We now know that the loop is vectorizable!
5742 // Collect instructions inside the loop that will remain uniform after
5745 // Global values, params and instructions outside of current loop are out of
5747 auto isOutOfScope = [&](Value *V) -> bool {
5748 Instruction *I = dyn_cast<Instruction>(V);
5749 return (!I || !TheLoop->contains(I));
5752 SetVector<Instruction *> Worklist;
5753 BasicBlock *Latch = TheLoop->getLoopLatch();
5755 // Start with the conditional branch. If the branch condition is an
5756 // instruction contained in the loop that is only used by the branch, it is
5758 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5759 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
5760 Worklist.insert(Cmp);
5761 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
5764 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
5765 // are pointers that are treated like consecutive pointers during
5766 // vectorization. The pointer operands of interleaved accesses are an
5768 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
5770 // Holds pointer operands of instructions that are possibly non-uniform.
5771 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
5773 auto isUniformDecision = [&](Instruction *I, unsigned VF) {
5774 InstWidening WideningDecision = getWideningDecision(I, VF);
5775 assert(WideningDecision != CM_Unknown &&
5776 "Widening decision should be ready at this moment");
5778 return (WideningDecision == CM_Widen ||
5779 WideningDecision == CM_Interleave);
5781 // Iterate over the instructions in the loop, and collect all
5782 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
5783 // that a consecutive-like pointer operand will be scalarized, we collect it
5784 // in PossibleNonUniformPtrs instead. We use two sets here because a single
5785 // getelementptr instruction can be used by both vectorized and scalarized
5786 // memory instructions. For example, if a loop loads and stores from the same
5787 // location, but the store is conditional, the store will be scalarized, and
5788 // the getelementptr won't remain uniform.
5789 for (auto *BB : TheLoop->blocks())
5790 for (auto &I : *BB) {
5792 // If there's no pointer operand, there's nothing to do.
5793 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I));
5797 // True if all users of Ptr are memory accesses that have Ptr as their
5799 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool {
5800 return getPointerOperand(U) == Ptr;
5803 // Ensure the memory instruction will not be scalarized or used by
5804 // gather/scatter, making its pointer operand non-uniform. If the pointer
5805 // operand is used by any instruction other than a memory access, we
5806 // conservatively assume the pointer operand may be non-uniform.
5807 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
5808 PossibleNonUniformPtrs.insert(Ptr);
5810 // If the memory instruction will be vectorized and its pointer operand
5811 // is consecutive-like, or interleaving - the pointer operand should
5814 ConsecutiveLikePtrs.insert(Ptr);
5817 // Add to the Worklist all consecutive and consecutive-like pointers that
5818 // aren't also identified as possibly non-uniform.
5819 for (auto *V : ConsecutiveLikePtrs)
5820 if (!PossibleNonUniformPtrs.count(V)) {
5821 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
5825 // Expand Worklist in topological order: whenever a new instruction
5826 // is added , its users should be either already inside Worklist, or
5827 // out of scope. It ensures a uniform instruction will only be used
5828 // by uniform instructions or out of scope instructions.
5830 while (idx != Worklist.size()) {
5831 Instruction *I = Worklist[idx++];
5833 for (auto OV : I->operand_values()) {
5834 if (isOutOfScope(OV))
5836 auto *OI = cast<Instruction>(OV);
5837 if (all_of(OI->users(), [&](User *U) -> bool {
5838 auto *J = cast<Instruction>(U);
5839 return !TheLoop->contains(J) || Worklist.count(J) ||
5840 (OI == getPointerOperand(J) && isUniformDecision(J, VF));
5842 Worklist.insert(OI);
5843 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
5848 // Returns true if Ptr is the pointer operand of a memory access instruction
5849 // I, and I is known to not require scalarization.
5850 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5851 return getPointerOperand(I) == Ptr && isUniformDecision(I, VF);
5854 // For an instruction to be added into Worklist above, all its users inside
5855 // the loop should also be in Worklist. However, this condition cannot be
5856 // true for phi nodes that form a cyclic dependence. We must process phi
5857 // nodes separately. An induction variable will remain uniform if all users
5858 // of the induction variable and induction variable update remain uniform.
5859 // The code below handles both pointer and non-pointer induction variables.
5860 for (auto &Induction : *Legal->getInductionVars()) {
5861 auto *Ind = Induction.first;
5862 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5864 // Determine if all users of the induction variable are uniform after
5866 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
5867 auto *I = cast<Instruction>(U);
5868 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5869 isVectorizedMemAccessUse(I, Ind);
5874 // Determine if all users of the induction variable update instruction are
5875 // uniform after vectorization.
5876 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
5877 auto *I = cast<Instruction>(U);
5878 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5879 isVectorizedMemAccessUse(I, IndUpdate);
5881 if (!UniformIndUpdate)
5884 // The induction variable and its update instruction will remain uniform.
5885 Worklist.insert(Ind);
5886 Worklist.insert(IndUpdate);
5887 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
5888 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
5891 Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5894 bool LoopVectorizationLegality::canVectorizeMemory() {
5895 LAI = &(*GetLAA)(*TheLoop);
5896 InterleaveInfo.setLAI(LAI);
5897 const OptimizationRemarkAnalysis *LAR = LAI->getReport();
5899 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(),
5900 "loop not vectorized: ", *LAR);
5903 if (!LAI->canVectorizeMemory())
5906 if (LAI->hasStoreToLoopInvariantAddress()) {
5907 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
5908 << "write to a loop invariant address could not be vectorized");
5909 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
5913 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
5914 PSE.addPredicate(LAI->getPSE().getUnionPredicate());
5919 bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
5920 Value *In0 = const_cast<Value *>(V);
5921 PHINode *PN = dyn_cast_or_null<PHINode>(In0);
5925 return Inductions.count(PN);
5928 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
5929 return FirstOrderRecurrences.count(Phi);
5932 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
5933 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
5936 bool LoopVectorizationLegality::blockCanBePredicated(
5937 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
5938 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
5940 for (Instruction &I : *BB) {
5941 // Check that we don't have a constant expression that can trap as operand.
5942 for (Value *Operand : I.operands()) {
5943 if (auto *C = dyn_cast<Constant>(Operand))
5947 // We might be able to hoist the load.
5948 if (I.mayReadFromMemory()) {
5949 auto *LI = dyn_cast<LoadInst>(&I);
5952 if (!SafePtrs.count(LI->getPointerOperand())) {
5953 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) ||
5954 isLegalMaskedGather(LI->getType())) {
5955 MaskedOp.insert(LI);
5958 // !llvm.mem.parallel_loop_access implies if-conversion safety.
5959 if (IsAnnotatedParallel)
5965 if (I.mayWriteToMemory()) {
5966 auto *SI = dyn_cast<StoreInst>(&I);
5967 // We only support predication of stores in basic blocks with one
5972 // Build a masked store if it is legal for the target.
5973 if (isLegalMaskedStore(SI->getValueOperand()->getType(),
5974 SI->getPointerOperand()) ||
5975 isLegalMaskedScatter(SI->getValueOperand()->getType())) {
5976 MaskedOp.insert(SI);
5980 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0);
5981 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor();
5983 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr ||
5984 !isSinglePredecessor)
5994 void InterleavedAccessInfo::collectConstStrideAccesses(
5995 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
5996 const ValueToValueMap &Strides) {
5998 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
6000 // Since it's desired that the load/store instructions be maintained in
6001 // "program order" for the interleaved access analysis, we have to visit the
6002 // blocks in the loop in reverse postorder (i.e., in a topological order).
6003 // Such an ordering will ensure that any load/store that may be executed
6004 // before a second load/store will precede the second load/store in
6005 // AccessStrideInfo.
6006 LoopBlocksDFS DFS(TheLoop);
6008 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
6009 for (auto &I : *BB) {
6010 auto *LI = dyn_cast<LoadInst>(&I);
6011 auto *SI = dyn_cast<StoreInst>(&I);
6015 Value *Ptr = getPointerOperand(&I);
6016 // We don't check wrapping here because we don't know yet if Ptr will be
6017 // part of a full group or a group with gaps. Checking wrapping for all
6018 // pointers (even those that end up in groups with no gaps) will be overly
6019 // conservative. For full groups, wrapping should be ok since if we would
6020 // wrap around the address space we would do a memory access at nullptr
6021 // even without the transformation. The wrapping checks are therefore
6022 // deferred until after we've formed the interleaved groups.
6023 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
6024 /*Assume=*/true, /*ShouldCheckWrap=*/false);
6026 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
6027 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
6028 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
6030 // An alignment of 0 means target ABI alignment.
6031 unsigned Align = getMemInstAlignment(&I);
6033 Align = DL.getABITypeAlignment(PtrTy->getElementType());
6035 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
6039 // Analyze interleaved accesses and collect them into interleaved load and
6042 // When generating code for an interleaved load group, we effectively hoist all
6043 // loads in the group to the location of the first load in program order. When
6044 // generating code for an interleaved store group, we sink all stores to the
6045 // location of the last store. This code motion can change the order of load
6046 // and store instructions and may break dependences.
6048 // The code generation strategy mentioned above ensures that we won't violate
6049 // any write-after-read (WAR) dependences.
6051 // E.g., for the WAR dependence: a = A[i]; // (1)
6054 // The store group of (2) is always inserted at or below (2), and the load
6055 // group of (1) is always inserted at or above (1). Thus, the instructions will
6056 // never be reordered. All other dependences are checked to ensure the
6057 // correctness of the instruction reordering.
6059 // The algorithm visits all memory accesses in the loop in bottom-up program
6060 // order. Program order is established by traversing the blocks in the loop in
6061 // reverse postorder when collecting the accesses.
6063 // We visit the memory accesses in bottom-up order because it can simplify the
6064 // construction of store groups in the presence of write-after-write (WAW)
6067 // E.g., for the WAW dependence: A[i] = a; // (1)
6069 // A[i + 1] = c; // (3)
6071 // We will first create a store group with (3) and (2). (1) can't be added to
6072 // this group because it and (2) are dependent. However, (1) can be grouped
6073 // with other accesses that may precede it in program order. Note that a
6074 // bottom-up order does not imply that WAW dependences should not be checked.
6075 void InterleavedAccessInfo::analyzeInterleaving(
6076 const ValueToValueMap &Strides) {
6077 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
6079 // Holds all accesses with a constant stride.
6080 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
6081 collectConstStrideAccesses(AccessStrideInfo, Strides);
6083 if (AccessStrideInfo.empty())
6086 // Collect the dependences in the loop.
6087 collectDependences();
6089 // Holds all interleaved store groups temporarily.
6090 SmallSetVector<InterleaveGroup *, 4> StoreGroups;
6091 // Holds all interleaved load groups temporarily.
6092 SmallSetVector<InterleaveGroup *, 4> LoadGroups;
6094 // Search in bottom-up program order for pairs of accesses (A and B) that can
6095 // form interleaved load or store groups. In the algorithm below, access A
6096 // precedes access B in program order. We initialize a group for B in the
6097 // outer loop of the algorithm, and then in the inner loop, we attempt to
6098 // insert each A into B's group if:
6100 // 1. A and B have the same stride,
6101 // 2. A and B have the same memory object size, and
6102 // 3. A belongs in B's group according to its distance from B.
6104 // Special care is taken to ensure group formation will not break any
6106 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
6108 Instruction *B = BI->first;
6109 StrideDescriptor DesB = BI->second;
6111 // Initialize a group for B if it has an allowable stride. Even if we don't
6112 // create a group for B, we continue with the bottom-up algorithm to ensure
6113 // we don't break any of B's dependences.
6114 InterleaveGroup *Group = nullptr;
6115 if (isStrided(DesB.Stride)) {
6116 Group = getInterleaveGroup(B);
6118 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
6119 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
6121 if (B->mayWriteToMemory())
6122 StoreGroups.insert(Group);
6124 LoadGroups.insert(Group);
6127 for (auto AI = std::next(BI); AI != E; ++AI) {
6128 Instruction *A = AI->first;
6129 StrideDescriptor DesA = AI->second;
6131 // Our code motion strategy implies that we can't have dependences
6132 // between accesses in an interleaved group and other accesses located
6133 // between the first and last member of the group. Note that this also
6134 // means that a group can't have more than one member at a given offset.
6135 // The accesses in a group can have dependences with other accesses, but
6136 // we must ensure we don't extend the boundaries of the group such that
6137 // we encompass those dependent accesses.
6139 // For example, assume we have the sequence of accesses shown below in a
6142 // (1, 2) is a group | A[i] = a; // (1)
6143 // | A[i-1] = b; // (2) |
6144 // A[i-3] = c; // (3)
6145 // A[i] = d; // (4) | (2, 4) is not a group
6147 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
6148 // but not with (4). If we did, the dependent access (3) would be within
6149 // the boundaries of the (2, 4) group.
6150 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
6152 // If a dependence exists and A is already in a group, we know that A
6153 // must be a store since A precedes B and WAR dependences are allowed.
6154 // Thus, A would be sunk below B. We release A's group to prevent this
6155 // illegal code motion. A will then be free to form another group with
6156 // instructions that precede it.
6157 if (isInterleaved(A)) {
6158 InterleaveGroup *StoreGroup = getInterleaveGroup(A);
6159 StoreGroups.remove(StoreGroup);
6160 releaseGroup(StoreGroup);
6163 // If a dependence exists and A is not already in a group (or it was
6164 // and we just released it), B might be hoisted above A (if B is a
6165 // load) or another store might be sunk below A (if B is a store). In
6166 // either case, we can't add additional instructions to B's group. B
6167 // will only form a group with instructions that it precedes.
6171 // At this point, we've checked for illegal code motion. If either A or B
6172 // isn't strided, there's nothing left to do.
6173 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
6176 // Ignore A if it's already in a group or isn't the same kind of memory
6178 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory())
6181 // Check rules 1 and 2. Ignore A if its stride or size is different from
6183 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
6186 // Ignore A if the memory object of A and B don't belong to the same
6188 if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
6191 // Calculate the distance from A to B.
6192 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
6193 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
6196 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
6198 // Check rule 3. Ignore A if its distance to B is not a multiple of the
6200 if (DistanceToB % static_cast<int64_t>(DesB.Size))
6203 // Ignore A if either A or B is in a predicated block. Although we
6204 // currently prevent group formation for predicated accesses, we may be
6205 // able to relax this limitation in the future once we handle more
6206 // complicated blocks.
6207 if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
6210 // The index of A is the index of B plus A's distance to B in multiples
6213 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
6215 // Try to insert A into B's group.
6216 if (Group->insertMember(A, IndexA, DesA.Align)) {
6217 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
6218 << " into the interleave group with" << *B << '\n');
6219 InterleaveGroupMap[A] = Group;
6221 // Set the first load in program order as the insert position.
6222 if (A->mayReadFromMemory())
6223 Group->setInsertPos(A);
6225 } // Iteration over A accesses.
6226 } // Iteration over B accesses.
6228 // Remove interleaved store groups with gaps.
6229 for (InterleaveGroup *Group : StoreGroups)
6230 if (Group->getNumMembers() != Group->getFactor())
6231 releaseGroup(Group);
6233 // Remove interleaved groups with gaps (currently only loads) whose memory
6234 // accesses may wrap around. We have to revisit the getPtrStride analysis,
6235 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
6236 // not check wrapping (see documentation there).
6237 // FORNOW we use Assume=false;
6238 // TODO: Change to Assume=true but making sure we don't exceed the threshold
6239 // of runtime SCEV assumptions checks (thereby potentially failing to
6240 // vectorize altogether).
6241 // Additional optional optimizations:
6242 // TODO: If we are peeling the loop and we know that the first pointer doesn't
6243 // wrap then we can deduce that all pointers in the group don't wrap.
6244 // This means that we can forcefully peel the loop in order to only have to
6245 // check the first pointer for no-wrap. When we'll change to use Assume=true
6246 // we'll only need at most one runtime check per interleaved group.
6248 for (InterleaveGroup *Group : LoadGroups) {
6250 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
6251 // load would wrap around the address space we would do a memory access at
6252 // nullptr even without the transformation.
6253 if (Group->getNumMembers() == Group->getFactor())
6256 // Case 2: If first and last members of the group don't wrap this implies
6257 // that all the pointers in the group don't wrap.
6258 // So we check only group member 0 (which is always guaranteed to exist),
6259 // and group member Factor - 1; If the latter doesn't exist we rely on
6260 // peeling (if it is a non-reveresed accsess -- see Case 3).
6261 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0));
6262 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
6263 /*ShouldCheckWrap=*/true)) {
6264 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6265 "first group member potentially pointer-wrapping.\n");
6266 releaseGroup(Group);
6269 Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
6271 Value *LastMemberPtr = getPointerOperand(LastMember);
6272 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
6273 /*ShouldCheckWrap=*/true)) {
6274 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
6275 "last group member potentially pointer-wrapping.\n");
6276 releaseGroup(Group);
6279 // Case 3: A non-reversed interleaved load group with gaps: We need
6280 // to execute at least one scalar epilogue iteration. This will ensure
6281 // we don't speculatively access memory out-of-bounds. We only need
6282 // to look for a member at index factor - 1, since every group must have
6283 // a member at index zero.
6284 if (Group->isReverse()) {
6285 releaseGroup(Group);
6288 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
6289 RequiresScalarEpilogue = true;
6294 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
6295 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) {
6296 ORE->emit(createMissedAnalysis("ConditionalStore")
6297 << "store that is conditionally executed prevents vectorization");
6298 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
6302 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
6303 return computeFeasibleMaxVF(OptForSize);
6305 if (Legal->getRuntimePointerChecking()->Need) {
6306 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
6307 << "runtime pointer checks needed. Enable vectorization of this "
6308 "loop with '#pragma clang loop vectorize(enable)' when "
6309 "compiling with -Os/-Oz");
6311 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
6315 // If we optimize the program for size, avoid creating the tail loop.
6316 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6317 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
6319 // If we don't know the precise trip count, don't try to vectorize.
6322 createMissedAnalysis("UnknownLoopCountComplexCFG")
6323 << "unable to calculate the loop count due to complex control flow");
6324 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6328 unsigned MaxVF = computeFeasibleMaxVF(OptForSize);
6330 if (TC % MaxVF != 0) {
6331 // If the trip count that we found modulo the vectorization factor is not
6332 // zero then we require a tail.
6333 // FIXME: look for a smaller MaxVF that does divide TC rather than give up.
6334 // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
6335 // smaller MaxVF that does not require a scalar epilog.
6337 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
6338 << "cannot optimize for size and vectorize at the "
6339 "same time. Enable vectorization of this loop "
6340 "with '#pragma clang loop vectorize(enable)' "
6341 "when compiling with -Os/-Oz");
6342 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
6349 unsigned LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize) {
6350 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
6351 unsigned SmallestType, WidestType;
6352 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
6353 unsigned WidestRegister = TTI.getRegisterBitWidth(true);
6354 unsigned MaxSafeDepDist = -1U;
6356 // Get the maximum safe dependence distance in bits computed by LAA. If the
6357 // loop contains any interleaved accesses, we divide the dependence distance
6358 // by the maximum interleave factor of all interleaved groups. Note that
6359 // although the division ensures correctness, this is a fairly conservative
6360 // computation because the maximum distance computed by LAA may not involve
6361 // any of the interleaved accesses.
6362 if (Legal->getMaxSafeDepDistBytes() != -1U)
6364 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor();
6367 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist);
6368 unsigned MaxVectorSize = WidestRegister / WidestType;
6370 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
6371 << WidestType << " bits.\n");
6372 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister
6375 if (MaxVectorSize == 0) {
6376 DEBUG(dbgs() << "LV: The target has no vector registers.\n");
6380 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
6381 " into one vector!");
6383 unsigned MaxVF = MaxVectorSize;
6384 if (MaximizeBandwidth && !OptForSize) {
6385 // Collect all viable vectorization factors.
6386 SmallVector<unsigned, 8> VFs;
6387 unsigned NewMaxVectorSize = WidestRegister / SmallestType;
6388 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2)
6391 // For each VF calculate its register usage.
6392 auto RUs = calculateRegisterUsage(VFs);
6394 // Select the largest VF which doesn't require more registers than existing
6396 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
6397 for (int i = RUs.size() - 1; i >= 0; --i) {
6398 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
6407 LoopVectorizationCostModel::VectorizationFactor
6408 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
6409 float Cost = expectedCost(1).first;
6411 const float ScalarCost = Cost;
6414 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
6416 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6417 // Ignore scalar width, because the user explicitly wants vectorization.
6418 if (ForceVectorization && MaxVF > 1) {
6420 Cost = expectedCost(Width).first / (float)Width;
6423 for (unsigned i = 2; i <= MaxVF; i *= 2) {
6424 // Notice that the vector loop needs to be executed less times, so
6425 // we need to divide the cost of the vector loops by the width of
6426 // the vector elements.
6427 VectorizationCostTy C = expectedCost(i);
6428 float VectorCost = C.first / (float)i;
6429 DEBUG(dbgs() << "LV: Vector loop of width " << i
6430 << " costs: " << (int)VectorCost << ".\n");
6431 if (!C.second && !ForceVectorization) {
6433 dbgs() << "LV: Not considering vector loop of width " << i
6434 << " because it will not generate any vector instructions.\n");
6437 if (VectorCost < Cost) {
6443 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
6444 << "LV: Vectorization seems to be not beneficial, "
6445 << "but was forced by a user.\n");
6446 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
6447 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
6451 std::pair<unsigned, unsigned>
6452 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6453 unsigned MinWidth = -1U;
6454 unsigned MaxWidth = 8;
6455 const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6458 for (BasicBlock *BB : TheLoop->blocks()) {
6459 // For each instruction in the loop.
6460 for (Instruction &I : *BB) {
6461 Type *T = I.getType();
6463 // Skip ignored values.
6464 if (ValuesToIgnore.count(&I))
6467 // Only examine Loads, Stores and PHINodes.
6468 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6471 // Examine PHI nodes that are reduction variables. Update the type to
6472 // account for the recurrence type.
6473 if (auto *PN = dyn_cast<PHINode>(&I)) {
6474 if (!Legal->isReductionVariable(PN))
6476 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
6477 T = RdxDesc.getRecurrenceType();
6480 // Examine the stored values.
6481 if (auto *ST = dyn_cast<StoreInst>(&I))
6482 T = ST->getValueOperand()->getType();
6484 // Ignore loaded pointer types and stored pointer types that are not
6487 // FIXME: The check here attempts to predict whether a load or store will
6488 // be vectorized. We only know this for certain after a VF has
6489 // been selected. Here, we assume that if an access can be
6490 // vectorized, it will be. We should also look at extending this
6491 // optimization to non-pointer types.
6493 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6494 !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I))
6497 MinWidth = std::min(MinWidth,
6498 (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6499 MaxWidth = std::max(MaxWidth,
6500 (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6504 return {MinWidth, MaxWidth};
6507 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
6509 unsigned LoopCost) {
6511 // -- The interleave heuristics --
6512 // We interleave the loop in order to expose ILP and reduce the loop overhead.
6513 // There are many micro-architectural considerations that we can't predict
6514 // at this level. For example, frontend pressure (on decode or fetch) due to
6515 // code size, or the number and capabilities of the execution ports.
6517 // We use the following heuristics to select the interleave count:
6518 // 1. If the code has reductions, then we interleave to break the cross
6519 // iteration dependency.
6520 // 2. If the loop is really small, then we interleave to reduce the loop
6522 // 3. We don't interleave if we think that we will spill registers to memory
6523 // due to the increased register pressure.
6525 // When we optimize for size, we don't interleave.
6529 // We used the distance for the interleave count.
6530 if (Legal->getMaxSafeDepDistBytes() != -1U)
6533 // Do not interleave loops with a relatively small trip count.
6534 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
6535 if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
6538 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
6539 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6543 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6544 TargetNumRegisters = ForceTargetNumScalarRegs;
6546 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6547 TargetNumRegisters = ForceTargetNumVectorRegs;
6550 RegisterUsage R = calculateRegisterUsage({VF})[0];
6551 // We divide by these constants so assume that we have at least one
6552 // instruction that uses at least one register.
6553 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
6554 R.NumInstructions = std::max(R.NumInstructions, 1U);
6556 // We calculate the interleave count using the following formula.
6557 // Subtract the number of loop invariants from the number of available
6558 // registers. These registers are used by all of the interleaved instances.
6559 // Next, divide the remaining registers by the number of registers that is
6560 // required by the loop, in order to estimate how many parallel instances
6561 // fit without causing spills. All of this is rounded down if necessary to be
6562 // a power of two. We want power of two interleave count to simplify any
6563 // addressing operations or alignment considerations.
6564 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
6567 // Don't count the induction variable as interleaved.
6568 if (EnableIndVarRegisterHeur)
6569 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
6570 std::max(1U, (R.MaxLocalUsers - 1)));
6572 // Clamp the interleave ranges to reasonable counts.
6573 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
6575 // Check if the user has overridden the max.
6577 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6578 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6580 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6581 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6584 // If we did not calculate the cost for VF (because the user selected the VF)
6585 // then we calculate the cost of VF here.
6587 LoopCost = expectedCost(VF).first;
6589 // Clamp the calculated IC to be between the 1 and the max interleave count
6590 // that the target allows.
6591 if (IC > MaxInterleaveCount)
6592 IC = MaxInterleaveCount;
6596 // Interleave if we vectorized this loop and there is a reduction that could
6597 // benefit from interleaving.
6598 if (VF > 1 && Legal->getReductionVars()->size()) {
6599 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6603 // Note that if we've already vectorized the loop we will have done the
6604 // runtime check and so interleaving won't require further checks.
6605 bool InterleavingRequiresRuntimePointerCheck =
6606 (VF == 1 && Legal->getRuntimePointerChecking()->Need);
6608 // We want to interleave small loops in order to reduce the loop overhead and
6609 // potentially expose ILP opportunities.
6610 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
6611 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6612 // We assume that the cost overhead is 1 and we use the cost model
6613 // to estimate the cost of the loop and interleave until the cost of the
6614 // loop overhead is about 5% of the cost of the loop.
6616 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6618 // Interleave until store/load ports (estimated by max interleave count) are
6620 unsigned NumStores = Legal->getNumStores();
6621 unsigned NumLoads = Legal->getNumLoads();
6622 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6623 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6625 // If we have a scalar reduction (vector reductions are already dealt with
6626 // by this point), we can increase the critical path length if the loop
6627 // we're interleaving is inside another loop. Limit, by default to 2, so the
6628 // critical path only gets increased by one reduction operation.
6629 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) {
6630 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6631 SmallIC = std::min(SmallIC, F);
6632 StoresIC = std::min(StoresIC, F);
6633 LoadsIC = std::min(LoadsIC, F);
6636 if (EnableLoadStoreRuntimeInterleave &&
6637 std::max(StoresIC, LoadsIC) > SmallIC) {
6638 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6639 return std::max(StoresIC, LoadsIC);
6642 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6646 // Interleave if this is a large loop (small loops are already dealt with by
6647 // this point) that could benefit from interleaving.
6648 bool HasReductions = (Legal->getReductionVars()->size() > 0);
6649 if (TTI.enableAggressiveInterleaving(HasReductions)) {
6650 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6654 DEBUG(dbgs() << "LV: Not Interleaving.\n");
6658 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6659 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
6660 // This function calculates the register usage by measuring the highest number
6661 // of values that are alive at a single location. Obviously, this is a very
6662 // rough estimation. We scan the loop in a topological order in order and
6663 // assign a number to each instruction. We use RPO to ensure that defs are
6664 // met before their users. We assume that each instruction that has in-loop
6665 // users starts an interval. We record every time that an in-loop value is
6666 // used, so we have a list of the first and last occurrences of each
6667 // instruction. Next, we transpose this data structure into a multi map that
6668 // holds the list of intervals that *end* at a specific location. This multi
6669 // map allows us to perform a linear search. We scan the instructions linearly
6670 // and record each time that a new interval starts, by placing it in a set.
6671 // If we find this value in the multi-map then we remove it from the set.
6672 // The max register usage is the maximum size of the set.
6673 // We also search for instructions that are defined outside the loop, but are
6674 // used inside the loop. We need this number separately from the max-interval
6675 // usage number because when we unroll, loop-invariant values do not take
6677 LoopBlocksDFS DFS(TheLoop);
6681 RU.NumInstructions = 0;
6683 // Each 'key' in the map opens a new interval. The values
6684 // of the map are the index of the 'last seen' usage of the
6685 // instruction that is the key.
6686 typedef DenseMap<Instruction *, unsigned> IntervalMap;
6687 // Maps instruction to its index.
6688 DenseMap<unsigned, Instruction *> IdxToInstr;
6689 // Marks the end of each interval.
6690 IntervalMap EndPoint;
6691 // Saves the list of instruction indices that are used in the loop.
6692 SmallSet<Instruction *, 8> Ends;
6693 // Saves the list of values that are used in the loop but are
6694 // defined outside the loop, such as arguments and constants.
6695 SmallPtrSet<Value *, 8> LoopInvariants;
6698 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6699 RU.NumInstructions += BB->size();
6700 for (Instruction &I : *BB) {
6701 IdxToInstr[Index++] = &I;
6703 // Save the end location of each USE.
6704 for (Value *U : I.operands()) {
6705 auto *Instr = dyn_cast<Instruction>(U);
6707 // Ignore non-instruction values such as arguments, constants, etc.
6711 // If this instruction is outside the loop then record it and continue.
6712 if (!TheLoop->contains(Instr)) {
6713 LoopInvariants.insert(Instr);
6717 // Overwrite previous end points.
6718 EndPoint[Instr] = Index;
6724 // Saves the list of intervals that end with the index in 'key'.
6725 typedef SmallVector<Instruction *, 2> InstrList;
6726 DenseMap<unsigned, InstrList> TransposeEnds;
6728 // Transpose the EndPoints to a list of values that end at each index.
6729 for (auto &Interval : EndPoint)
6730 TransposeEnds[Interval.second].push_back(Interval.first);
6732 SmallSet<Instruction *, 8> OpenIntervals;
6734 // Get the size of the widest register.
6735 unsigned MaxSafeDepDist = -1U;
6736 if (Legal->getMaxSafeDepDistBytes() != -1U)
6737 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
6738 unsigned WidestRegister =
6739 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
6740 const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6742 SmallVector<RegisterUsage, 8> RUs(VFs.size());
6743 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
6745 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6747 // A lambda that gets the register usage for the given type and VF.
6748 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
6749 if (Ty->isTokenTy())
6751 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
6752 return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
6755 for (unsigned int i = 0; i < Index; ++i) {
6756 Instruction *I = IdxToInstr[i];
6758 // Remove all of the instructions that end at this location.
6759 InstrList &List = TransposeEnds[i];
6760 for (Instruction *ToRemove : List)
6761 OpenIntervals.erase(ToRemove);
6763 // Ignore instructions that are never used within the loop.
6767 // Skip ignored values.
6768 if (ValuesToIgnore.count(I))
6771 // For each VF find the maximum usage of registers.
6772 for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6774 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
6777 collectUniformsAndScalars(VFs[j]);
6778 // Count the number of live intervals.
6779 unsigned RegUsage = 0;
6780 for (auto Inst : OpenIntervals) {
6781 // Skip ignored values for VF > 1.
6782 if (VecValuesToIgnore.count(Inst) ||
6783 isScalarAfterVectorization(Inst, VFs[j]))
6785 RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
6787 MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
6790 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6791 << OpenIntervals.size() << '\n');
6793 // Add the current instruction to the list of open intervals.
6794 OpenIntervals.insert(I);
6797 for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6798 unsigned Invariant = 0;
6800 Invariant = LoopInvariants.size();
6802 for (auto Inst : LoopInvariants)
6803 Invariant += GetRegUsage(Inst->getType(), VFs[i]);
6806 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
6807 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
6808 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
6809 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n');
6811 RU.LoopInvariantRegs = Invariant;
6812 RU.MaxLocalUsers = MaxUsages[i];
6819 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
6821 // If we aren't vectorizing the loop, or if we've already collected the
6822 // instructions to scalarize, there's nothing to do. Collection may already
6823 // have occurred if we have a user-selected VF and are now computing the
6824 // expected cost for interleaving.
6825 if (VF < 2 || InstsToScalarize.count(VF))
6828 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6829 // not profitable to scalarize any instructions, the presence of VF in the
6830 // map will indicate that we've analyzed it already.
6831 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6833 // Find all the instructions that are scalar with predication in the loop and
6834 // determine if it would be better to not if-convert the blocks they are in.
6835 // If so, we also record the instructions to scalarize.
6836 for (BasicBlock *BB : TheLoop->blocks()) {
6837 if (!Legal->blockNeedsPredication(BB))
6839 for (Instruction &I : *BB)
6840 if (Legal->isScalarWithPredication(&I)) {
6841 ScalarCostsTy ScalarCosts;
6842 if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6843 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6845 // Remember that BB will remain after vectorization.
6846 PredicatedBBsAfterVectorization.insert(BB);
6851 int LoopVectorizationCostModel::computePredInstDiscount(
6852 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
6855 assert(!isUniformAfterVectorization(PredInst, VF) &&
6856 "Instruction marked uniform-after-vectorization will be predicated");
6858 // Initialize the discount to zero, meaning that the scalar version and the
6859 // vector version cost the same.
6862 // Holds instructions to analyze. The instructions we visit are mapped in
6863 // ScalarCosts. Those instructions are the ones that would be scalarized if
6864 // we find that the scalar version costs less.
6865 SmallVector<Instruction *, 8> Worklist;
6867 // Returns true if the given instruction can be scalarized.
6868 auto canBeScalarized = [&](Instruction *I) -> bool {
6870 // We only attempt to scalarize instructions forming a single-use chain
6871 // from the original predicated block that would otherwise be vectorized.
6872 // Although not strictly necessary, we give up on instructions we know will
6873 // already be scalar to avoid traversing chains that are unlikely to be
6875 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6876 isScalarAfterVectorization(I, VF))
6879 // If the instruction is scalar with predication, it will be analyzed
6880 // separately. We ignore it within the context of PredInst.
6881 if (Legal->isScalarWithPredication(I))
6884 // If any of the instruction's operands are uniform after vectorization,
6885 // the instruction cannot be scalarized. This prevents, for example, a
6886 // masked load from being scalarized.
6888 // We assume we will only emit a value for lane zero of an instruction
6889 // marked uniform after vectorization, rather than VF identical values.
6890 // Thus, if we scalarize an instruction that uses a uniform, we would
6891 // create uses of values corresponding to the lanes we aren't emitting code
6892 // for. This behavior can be changed by allowing getScalarValue to clone
6893 // the lane zero values for uniforms rather than asserting.
6894 for (Use &U : I->operands())
6895 if (auto *J = dyn_cast<Instruction>(U.get()))
6896 if (isUniformAfterVectorization(J, VF))
6899 // Otherwise, we can scalarize the instruction.
6903 // Returns true if an operand that cannot be scalarized must be extracted
6904 // from a vector. We will account for this scalarization overhead below. Note
6905 // that the non-void predicated instructions are placed in their own blocks,
6906 // and their return values are inserted into vectors. Thus, an extract would
6907 // still be required.
6908 auto needsExtract = [&](Instruction *I) -> bool {
6909 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
6912 // Compute the expected cost discount from scalarizing the entire expression
6913 // feeding the predicated instruction. We currently only consider expressions
6914 // that are single-use instruction chains.
6915 Worklist.push_back(PredInst);
6916 while (!Worklist.empty()) {
6917 Instruction *I = Worklist.pop_back_val();
6919 // If we've already analyzed the instruction, there's nothing to do.
6920 if (ScalarCosts.count(I))
6923 // Compute the cost of the vector instruction. Note that this cost already
6924 // includes the scalarization overhead of the predicated instruction.
6925 unsigned VectorCost = getInstructionCost(I, VF).first;
6927 // Compute the cost of the scalarized instruction. This cost is the cost of
6928 // the instruction as if it wasn't if-converted and instead remained in the
6929 // predicated block. We will scale this cost by block probability after
6930 // computing the scalarization overhead.
6931 unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
6933 // Compute the scalarization overhead of needed insertelement instructions
6935 if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6936 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
6938 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
6941 // Compute the scalarization overhead of needed extractelement
6942 // instructions. For each of the instruction's operands, if the operand can
6943 // be scalarized, add it to the worklist; otherwise, account for the
6945 for (Use &U : I->operands())
6946 if (auto *J = dyn_cast<Instruction>(U.get())) {
6947 assert(VectorType::isValidElementType(J->getType()) &&
6948 "Instruction has non-scalar type");
6949 if (canBeScalarized(J))
6950 Worklist.push_back(J);
6951 else if (needsExtract(J))
6952 ScalarCost += TTI.getScalarizationOverhead(
6953 ToVectorTy(J->getType(),VF), false, true);
6956 // Scale the total scalar cost by block probability.
6957 ScalarCost /= getReciprocalPredBlockProb();
6959 // Compute the discount. A non-negative discount means the vector version
6960 // of the instruction costs more, and scalarizing would be beneficial.
6961 Discount += VectorCost - ScalarCost;
6962 ScalarCosts[I] = ScalarCost;
6968 LoopVectorizationCostModel::VectorizationCostTy
6969 LoopVectorizationCostModel::expectedCost(unsigned VF) {
6970 VectorizationCostTy Cost;
6972 // Collect Uniform and Scalar instructions after vectorization with VF.
6973 collectUniformsAndScalars(VF);
6975 // Collect the instructions (and their associated costs) that will be more
6976 // profitable to scalarize.
6977 collectInstsToScalarize(VF);
6980 for (BasicBlock *BB : TheLoop->blocks()) {
6981 VectorizationCostTy BlockCost;
6983 // For each instruction in the old loop.
6984 for (Instruction &I : *BB) {
6985 // Skip dbg intrinsics.
6986 if (isa<DbgInfoIntrinsic>(I))
6989 // Skip ignored values.
6990 if (ValuesToIgnore.count(&I))
6993 VectorizationCostTy C = getInstructionCost(&I, VF);
6995 // Check if we should override the cost.
6996 if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6997 C.first = ForceTargetInstructionCost;
6999 BlockCost.first += C.first;
7000 BlockCost.second |= C.second;
7001 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
7002 << VF << " For instruction: " << I << '\n');
7005 // If we are vectorizing a predicated block, it will have been
7006 // if-converted. This means that the block's instructions (aside from
7007 // stores and instructions that may divide by zero) will now be
7008 // unconditionally executed. For the scalar case, we may not always execute
7009 // the predicated block. Thus, scale the block's cost by the probability of
7011 if (VF == 1 && Legal->blockNeedsPredication(BB))
7012 BlockCost.first /= getReciprocalPredBlockProb();
7014 Cost.first += BlockCost.first;
7015 Cost.second |= BlockCost.second;
7021 /// \brief Gets Address Access SCEV after verifying that the access pattern
7022 /// is loop invariant except the induction variable dependence.
7024 /// This SCEV can be sent to the Target in order to estimate the address
7025 /// calculation cost.
7026 static const SCEV *getAddressAccessSCEV(
7028 LoopVectorizationLegality *Legal,
7029 ScalarEvolution *SE,
7030 const Loop *TheLoop) {
7031 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
7035 // We are looking for a gep with all loop invariant indices except for one
7036 // which should be an induction variable.
7037 unsigned NumOperands = Gep->getNumOperands();
7038 for (unsigned i = 1; i < NumOperands; ++i) {
7039 Value *Opd = Gep->getOperand(i);
7040 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
7041 !Legal->isInductionVariable(Opd))
7045 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
7046 return SE->getSCEV(Ptr);
7049 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
7050 return Legal->hasStride(I->getOperand(0)) ||
7051 Legal->hasStride(I->getOperand(1));
7054 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
7056 Type *ValTy = getMemInstValueType(I);
7057 auto SE = PSE.getSE();
7059 unsigned Alignment = getMemInstAlignment(I);
7060 unsigned AS = getMemInstAddressSpace(I);
7061 Value *Ptr = getPointerOperand(I);
7062 Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
7064 // Figure out whether the access is strided and get the stride value
7065 // if it's known in compile time
7066 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop);
7068 // Get the cost of the scalar memory instruction and address computation.
7069 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
7072 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
7075 // Get the overhead of the extractelement and insertelement instructions
7076 // we might create due to scalarization.
7077 Cost += getScalarizationOverhead(I, VF, TTI);
7079 // If we have a predicated store, it may not be executed for each vector
7080 // lane. Scale the cost by the probability of executing the predicated
7082 if (Legal->isScalarWithPredication(I))
7083 Cost /= getReciprocalPredBlockProb();
7088 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
7090 Type *ValTy = getMemInstValueType(I);
7091 Type *VectorTy = ToVectorTy(ValTy, VF);
7092 unsigned Alignment = getMemInstAlignment(I);
7093 Value *Ptr = getPointerOperand(I);
7094 unsigned AS = getMemInstAddressSpace(I);
7095 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
7097 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7098 "Stride should be 1 or -1 for consecutive memory access");
7100 if (Legal->isMaskRequired(I))
7101 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
7103 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
7105 bool Reverse = ConsecutiveStride < 0;
7107 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
7111 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
7113 LoadInst *LI = cast<LoadInst>(I);
7114 Type *ValTy = LI->getType();
7115 Type *VectorTy = ToVectorTy(ValTy, VF);
7116 unsigned Alignment = LI->getAlignment();
7117 unsigned AS = LI->getPointerAddressSpace();
7119 return TTI.getAddressComputationCost(ValTy) +
7120 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
7121 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
7124 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7126 Type *ValTy = getMemInstValueType(I);
7127 Type *VectorTy = ToVectorTy(ValTy, VF);
7128 unsigned Alignment = getMemInstAlignment(I);
7129 Value *Ptr = getPointerOperand(I);
7131 return TTI.getAddressComputationCost(VectorTy) +
7132 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
7133 Legal->isMaskRequired(I), Alignment);
7136 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7138 Type *ValTy = getMemInstValueType(I);
7139 Type *VectorTy = ToVectorTy(ValTy, VF);
7140 unsigned AS = getMemInstAddressSpace(I);
7142 auto Group = Legal->getInterleavedAccessGroup(I);
7143 assert(Group && "Fail to get an interleaved access group.");
7145 unsigned InterleaveFactor = Group->getFactor();
7146 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7148 // Holds the indices of existing members in an interleaved load group.
7149 // An interleaved store group doesn't need this as it doesn't allow gaps.
7150 SmallVector<unsigned, 4> Indices;
7151 if (isa<LoadInst>(I)) {
7152 for (unsigned i = 0; i < InterleaveFactor; i++)
7153 if (Group->getMember(i))
7154 Indices.push_back(i);
7157 // Calculate the cost of the whole interleaved group.
7158 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
7159 Group->getFactor(), Indices,
7160 Group->getAlignment(), AS);
7162 if (Group->isReverse())
7163 Cost += Group->getNumMembers() *
7164 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
7168 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7171 // Calculate scalar cost only. Vectorization cost should be ready at this
7174 Type *ValTy = getMemInstValueType(I);
7175 unsigned Alignment = getMemInstAlignment(I);
7176 unsigned AS = getMemInstAlignment(I);
7178 return TTI.getAddressComputationCost(ValTy) +
7179 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
7181 return getWideningCost(I, VF);
7184 LoopVectorizationCostModel::VectorizationCostTy
7185 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
7186 // If we know that this instruction will remain uniform, check the cost of
7187 // the scalar version.
7188 if (isUniformAfterVectorization(I, VF))
7191 if (VF > 1 && isProfitableToScalarize(I, VF))
7192 return VectorizationCostTy(InstsToScalarize[VF][I], false);
7195 unsigned C = getInstructionCost(I, VF, VectorTy);
7197 bool TypeNotScalarized =
7198 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF;
7199 return VectorizationCostTy(C, TypeNotScalarized);
7202 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
7205 for (BasicBlock *BB : TheLoop->blocks()) {
7206 // For each instruction in the old loop.
7207 for (Instruction &I : *BB) {
7208 Value *Ptr = getPointerOperand(&I);
7212 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
7213 // Scalar load + broadcast
7214 unsigned Cost = getUniformMemOpCost(&I, VF);
7215 setWideningDecision(&I, VF, CM_Scalarize, Cost);
7219 // We assume that widening is the best solution when possible.
7220 if (Legal->memoryInstructionCanBeWidened(&I, VF)) {
7221 unsigned Cost = getConsecutiveMemOpCost(&I, VF);
7222 setWideningDecision(&I, VF, CM_Widen, Cost);
7226 // Choose between Interleaving, Gather/Scatter or Scalarization.
7227 unsigned InterleaveCost = UINT_MAX;
7228 unsigned NumAccesses = 1;
7229 if (Legal->isAccessInterleaved(&I)) {
7230 auto Group = Legal->getInterleavedAccessGroup(&I);
7231 assert(Group && "Fail to get an interleaved access group.");
7233 // Make one decision for the whole group.
7234 if (getWideningDecision(&I, VF) != CM_Unknown)
7237 NumAccesses = Group->getNumMembers();
7238 InterleaveCost = getInterleaveGroupCost(&I, VF);
7241 unsigned GatherScatterCost =
7242 Legal->isLegalGatherOrScatter(&I)
7243 ? getGatherScatterCost(&I, VF) * NumAccesses
7246 unsigned ScalarizationCost =
7247 getMemInstScalarizationCost(&I, VF) * NumAccesses;
7249 // Choose better solution for the current VF,
7250 // write down this decision and use it during vectorization.
7252 InstWidening Decision;
7253 if (InterleaveCost <= GatherScatterCost &&
7254 InterleaveCost < ScalarizationCost) {
7255 Decision = CM_Interleave;
7256 Cost = InterleaveCost;
7257 } else if (GatherScatterCost < ScalarizationCost) {
7258 Decision = CM_GatherScatter;
7259 Cost = GatherScatterCost;
7261 Decision = CM_Scalarize;
7262 Cost = ScalarizationCost;
7264 // If the instructions belongs to an interleave group, the whole group
7265 // receives the same decision. The whole group receives the cost, but
7266 // the cost will actually be assigned to one instruction.
7267 if (auto Group = Legal->getInterleavedAccessGroup(&I))
7268 setWideningDecision(Group, VF, Decision, Cost);
7270 setWideningDecision(&I, VF, Decision, Cost);
7275 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7278 Type *RetTy = I->getType();
7279 if (canTruncateToMinimalBitwidth(I, VF))
7280 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7281 VectorTy = ToVectorTy(RetTy, VF);
7282 auto SE = PSE.getSE();
7284 // TODO: We need to estimate the cost of intrinsic calls.
7285 switch (I->getOpcode()) {
7286 case Instruction::GetElementPtr:
7287 // We mark this instruction as zero-cost because the cost of GEPs in
7288 // vectorized code depends on whether the corresponding memory instruction
7289 // is scalarized or not. Therefore, we handle GEPs with the memory
7290 // instruction cost.
7292 case Instruction::Br: {
7293 // In cases of scalarized and predicated instructions, there will be VF
7294 // predicated blocks in the vectorized loop. Each branch around these
7295 // blocks requires also an extract of its vector compare i1 element.
7296 bool ScalarPredicatedBB = false;
7297 BranchInst *BI = cast<BranchInst>(I);
7298 if (VF > 1 && BI->isConditional() &&
7299 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7300 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7301 ScalarPredicatedBB = true;
7303 if (ScalarPredicatedBB) {
7304 // Return cost for branches around scalarized and predicated blocks.
7306 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7307 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
7308 (TTI.getCFInstrCost(Instruction::Br) * VF));
7309 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
7310 // The back-edge branch will remain, as will all scalar branches.
7311 return TTI.getCFInstrCost(Instruction::Br);
7313 // This branch will be eliminated by if-conversion.
7315 // Note: We currently assume zero cost for an unconditional branch inside
7316 // a predicated block since it will become a fall-through, although we
7317 // may decide in the future to call TTI for all branches.
7319 case Instruction::PHI: {
7320 auto *Phi = cast<PHINode>(I);
7322 // First-order recurrences are replaced by vector shuffles inside the loop.
7323 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
7324 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
7325 VectorTy, VF - 1, VectorTy);
7327 // TODO: IF-converted IFs become selects.
7330 case Instruction::UDiv:
7331 case Instruction::SDiv:
7332 case Instruction::URem:
7333 case Instruction::SRem:
7334 // If we have a predicated instruction, it may not be executed for each
7335 // vector lane. Get the scalarization cost and scale this amount by the
7336 // probability of executing the predicated block. If the instruction is not
7337 // predicated, we fall through to the next case.
7338 if (VF > 1 && Legal->isScalarWithPredication(I)) {
7341 // These instructions have a non-void type, so account for the phi nodes
7342 // that we will create. This cost is likely to be zero. The phi node
7343 // cost, if any, should be scaled by the block probability because it
7344 // models a copy at the end of each predicated block.
7345 Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
7347 // The cost of the non-predicated instruction.
7348 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
7350 // The cost of insertelement and extractelement instructions needed for
7352 Cost += getScalarizationOverhead(I, VF, TTI);
7354 // Scale the cost by the probability of executing the predicated blocks.
7355 // This assumes the predicated block for each vector lane is equally
7357 return Cost / getReciprocalPredBlockProb();
7359 case Instruction::Add:
7360 case Instruction::FAdd:
7361 case Instruction::Sub:
7362 case Instruction::FSub:
7363 case Instruction::Mul:
7364 case Instruction::FMul:
7365 case Instruction::FDiv:
7366 case Instruction::FRem:
7367 case Instruction::Shl:
7368 case Instruction::LShr:
7369 case Instruction::AShr:
7370 case Instruction::And:
7371 case Instruction::Or:
7372 case Instruction::Xor: {
7373 // Since we will replace the stride by 1 the multiplication should go away.
7374 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7376 // Certain instructions can be cheaper to vectorize if they have a constant
7377 // second vector operand. One example of this are shifts on x86.
7378 TargetTransformInfo::OperandValueKind Op1VK =
7379 TargetTransformInfo::OK_AnyValue;
7380 TargetTransformInfo::OperandValueKind Op2VK =
7381 TargetTransformInfo::OK_AnyValue;
7382 TargetTransformInfo::OperandValueProperties Op1VP =
7383 TargetTransformInfo::OP_None;
7384 TargetTransformInfo::OperandValueProperties Op2VP =
7385 TargetTransformInfo::OP_None;
7386 Value *Op2 = I->getOperand(1);
7388 // Check for a splat or for a non uniform vector of constants.
7389 if (isa<ConstantInt>(Op2)) {
7390 ConstantInt *CInt = cast<ConstantInt>(Op2);
7391 if (CInt && CInt->getValue().isPowerOf2())
7392 Op2VP = TargetTransformInfo::OP_PowerOf2;
7393 Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7394 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
7395 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
7396 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
7398 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
7399 if (CInt && CInt->getValue().isPowerOf2())
7400 Op2VP = TargetTransformInfo::OP_PowerOf2;
7401 Op2VK = TargetTransformInfo::OK_UniformConstantValue;
7403 } else if (Legal->isUniform(Op2)) {
7404 Op2VK = TargetTransformInfo::OK_UniformValue;
7406 SmallVector<const Value *, 4> Operands(I->operand_values());
7407 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
7408 Op2VK, Op1VP, Op2VP, Operands);
7410 case Instruction::Select: {
7411 SelectInst *SI = cast<SelectInst>(I);
7412 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7413 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7414 Type *CondTy = SI->getCondition()->getType();
7416 CondTy = VectorType::get(CondTy, VF);
7418 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
7420 case Instruction::ICmp:
7421 case Instruction::FCmp: {
7422 Type *ValTy = I->getOperand(0)->getType();
7423 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7424 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7425 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7426 VectorTy = ToVectorTy(ValTy, VF);
7427 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
7429 case Instruction::Store:
7430 case Instruction::Load: {
7431 VectorTy = ToVectorTy(getMemInstValueType(I), VF);
7432 return getMemoryInstructionCost(I, VF);
7434 case Instruction::ZExt:
7435 case Instruction::SExt:
7436 case Instruction::FPToUI:
7437 case Instruction::FPToSI:
7438 case Instruction::FPExt:
7439 case Instruction::PtrToInt:
7440 case Instruction::IntToPtr:
7441 case Instruction::SIToFP:
7442 case Instruction::UIToFP:
7443 case Instruction::Trunc:
7444 case Instruction::FPTrunc:
7445 case Instruction::BitCast: {
7446 // We optimize the truncation of induction variables having constant
7447 // integer steps. The cost of these truncations is the same as the scalar
7449 if (isOptimizableIVTruncate(I, VF)) {
7450 auto *Trunc = cast<TruncInst>(I);
7451 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7452 Trunc->getSrcTy(), Trunc);
7455 Type *SrcScalarTy = I->getOperand(0)->getType();
7456 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF);
7457 if (canTruncateToMinimalBitwidth(I, VF)) {
7458 // This cast is going to be shrunk. This may remove the cast or it might
7459 // turn it into slightly different cast. For example, if MinBW == 16,
7460 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7462 // Calculate the modified src and dest types.
7463 Type *MinVecTy = VectorTy;
7464 if (I->getOpcode() == Instruction::Trunc) {
7465 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7467 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7468 } else if (I->getOpcode() == Instruction::ZExt ||
7469 I->getOpcode() == Instruction::SExt) {
7470 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7472 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7476 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
7478 case Instruction::Call: {
7479 bool NeedToScalarize;
7480 CallInst *CI = cast<CallInst>(I);
7481 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
7482 if (getVectorIntrinsicIDForCall(CI, TLI))
7483 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
7487 // The cost of executing VF copies of the scalar instruction. This opcode
7488 // is unknown. Assume that it is the same as 'mul'.
7489 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
7490 getScalarizationOverhead(I, VF, TTI);
7494 char LoopVectorize::ID = 0;
7495 static const char lv_name[] = "Loop Vectorization";
7496 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7497 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7498 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7499 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7500 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7501 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7502 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7503 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7504 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7505 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7506 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7507 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7508 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7509 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7512 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
7513 return new LoopVectorize(NoUnrolling, AlwaysVectorize);
7517 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7519 // Check if the pointer operand of a load or store instruction is
7521 if (auto *Ptr = getPointerOperand(Inst))
7522 return Legal->isConsecutivePtr(Ptr);
7526 void LoopVectorizationCostModel::collectValuesToIgnore() {
7527 // Ignore ephemeral values.
7528 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7530 // Ignore type-promoting instructions we identified during reduction
7532 for (auto &Reduction : *Legal->getReductionVars()) {
7533 RecurrenceDescriptor &RedDes = Reduction.second;
7534 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7535 VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7539 LoopVectorizationCostModel::VectorizationFactor
7540 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
7542 // Width 1 means no vectorize, cost 0 means uncomputed cost.
7543 const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U,
7545 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
7546 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
7547 return NoVectorization;
7550 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7551 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
7552 // Collect the instructions (and their associated costs) that will be more
7553 // profitable to scalarize.
7554 CM.selectUserVectorizationFactor(UserVF);
7558 unsigned MaxVF = MaybeMaxVF.getValue();
7559 assert(MaxVF != 0 && "MaxVF is zero.");
7561 return NoVectorization;
7563 // Select the optimal vectorization factor.
7564 return CM.selectVectorizationFactor(MaxVF);
7567 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) {
7568 auto *SI = dyn_cast<StoreInst>(Instr);
7569 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent()));
7571 return scalarizeInstruction(Instr, IfPredicateInstr);
7574 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7576 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7578 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7579 Instruction::BinaryOps BinOp) {
7580 // When unrolling and the VF is 1, we only need to add a simple scalar.
7581 Type *Ty = Val->getType();
7582 assert(!Ty->isVectorTy() && "Val must be a scalar");
7584 if (Ty->isFloatingPointTy()) {
7585 Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7587 // Floating point operations had to be 'fast' to enable the unrolling.
7588 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7589 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7591 Constant *C = ConstantInt::get(Ty, StartIdx);
7592 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7595 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7596 SmallVector<Metadata *, 4> MDs;
7597 // Reserve first location for self reference to the LoopID metadata node.
7598 MDs.push_back(nullptr);
7599 bool IsUnrollMetadata = false;
7600 MDNode *LoopID = L->getLoopID();
7602 // First find existing loop unrolling disable metadata.
7603 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7604 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7606 const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7608 S && S->getString().startswith("llvm.loop.unroll.disable");
7610 MDs.push_back(LoopID->getOperand(i));
7614 if (!IsUnrollMetadata) {
7615 // Add runtime unroll disable metadata.
7616 LLVMContext &Context = L->getHeader()->getContext();
7617 SmallVector<Metadata *, 1> DisableOperands;
7618 DisableOperands.push_back(
7619 MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7620 MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7621 MDs.push_back(DisableNode);
7622 MDNode *NewLoopID = MDNode::get(Context, MDs);
7623 // Set operand 0 to refer to the loop id itself.
7624 NewLoopID->replaceOperandWith(0, NewLoopID);
7625 L->setLoopID(NewLoopID);
7629 bool LoopVectorizePass::processLoop(Loop *L) {
7630 assert(L->empty() && "Only process inner loops.");
7633 const std::string DebugLocStr = getDebugLocString(L);
7636 DEBUG(dbgs() << "\nLV: Checking a loop in \""
7637 << L->getHeader()->getParent()->getName() << "\" from "
7638 << DebugLocStr << "\n");
7640 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
7642 DEBUG(dbgs() << "LV: Loop hints:"
7644 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7646 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7649 << " width=" << Hints.getWidth()
7650 << " unroll=" << Hints.getInterleave() << "\n");
7652 // Function containing loop
7653 Function *F = L->getHeader()->getParent();
7655 // Looking at the diagnostic output is the only way to determine if a loop
7656 // was vectorized (other than looking at the IR or machine code), so it
7657 // is important to generate an optimization remark for each loop. Most of
7658 // these messages are generated as OptimizationRemarkAnalysis. Remarks
7659 // generated as OptimizationRemark and OptimizationRemarkMissed are
7660 // less verbose reporting vectorized loops and unvectorized loops that may
7661 // benefit from vectorization, respectively.
7663 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
7664 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7668 // Check the loop for a trip count threshold:
7669 // do not vectorize loops with a tiny trip count.
7670 const unsigned MaxTC = SE->getSmallConstantMaxTripCount(L);
7671 if (MaxTC > 0u && MaxTC < TinyTripCountVectorThreshold) {
7672 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7673 << "This loop is not worth vectorizing.");
7674 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7675 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7677 DEBUG(dbgs() << "\n");
7678 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7680 << "vectorization is not beneficial "
7681 "and is not explicitly forced");
7686 PredicatedScalarEvolution PSE(*SE, *L);
7688 // Check if it is legal to vectorize the loop.
7689 LoopVectorizationRequirements Requirements(*ORE);
7690 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE,
7691 &Requirements, &Hints);
7692 if (!LVL.canVectorize()) {
7693 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7694 emitMissedWarning(F, L, Hints, ORE);
7698 // Check the function attributes to find out if this function should be
7699 // optimized for size.
7701 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
7703 // Compute the weighted frequency of this loop being executed and see if it
7704 // is less than 20% of the function entry baseline frequency. Note that we
7705 // always have a canonical loop here because we think we *can* vectorize.
7706 // FIXME: This is hidden behind a flag due to pervasive problems with
7707 // exactly what block frequency models.
7708 if (LoopVectorizeWithBlockFrequency) {
7709 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader());
7710 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
7711 LoopEntryFreq < ColdEntryFreq)
7715 // Check the function attributes to see if implicit floats are allowed.
7716 // FIXME: This check doesn't seem possibly correct -- what if the loop is
7717 // an integer loop and the vector instructions selected are purely integer
7718 // vector instructions?
7719 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7720 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
7721 "attribute is used.\n");
7722 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
7723 "NoImplicitFloat", L)
7724 << "loop not vectorized due to NoImplicitFloat attribute");
7725 emitMissedWarning(F, L, Hints, ORE);
7729 // Check if the target supports potentially unsafe FP vectorization.
7730 // FIXME: Add a check for the type of safety issue (denormal, signaling)
7731 // for the target we're vectorizing for, to make sure none of the
7732 // additional fp-math flags can help.
7733 if (Hints.isPotentiallyUnsafe() &&
7734 TTI->isFPVectorizationPotentiallyUnsafe()) {
7735 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
7737 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
7738 << "loop not vectorized due to unsafe FP support.");
7739 emitMissedWarning(F, L, Hints, ORE);
7743 // Use the cost model.
7744 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
7746 CM.collectValuesToIgnore();
7748 // Use the planner for vectorization.
7749 LoopVectorizationPlanner LVP(CM);
7751 // Get user vectorization factor.
7752 unsigned UserVF = Hints.getWidth();
7754 // Plan how to best vectorize, return the best VF and its cost.
7755 LoopVectorizationCostModel::VectorizationFactor VF =
7756 LVP.plan(OptForSize, UserVF);
7758 // Select the interleave count.
7759 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
7761 // Get user interleave count.
7762 unsigned UserIC = Hints.getInterleave();
7764 // Identify the diagnostic messages that should be produced.
7765 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7766 bool VectorizeLoop = true, InterleaveLoop = true;
7767 if (Requirements.doesNotMeet(F, L, Hints)) {
7768 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7770 emitMissedWarning(F, L, Hints, ORE);
7774 if (VF.Width == 1) {
7775 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7776 VecDiagMsg = std::make_pair(
7777 "VectorizationNotBeneficial",
7778 "the cost-model indicates that vectorization is not beneficial");
7779 VectorizeLoop = false;
7782 if (IC == 1 && UserIC <= 1) {
7783 // Tell the user interleaving is not beneficial.
7784 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7785 IntDiagMsg = std::make_pair(
7786 "InterleavingNotBeneficial",
7787 "the cost-model indicates that interleaving is not beneficial");
7788 InterleaveLoop = false;
7790 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7791 IntDiagMsg.second +=
7792 " and is explicitly disabled or interleave count is set to 1";
7794 } else if (IC > 1 && UserIC == 1) {
7795 // Tell the user interleaving is beneficial, but it explicitly disabled.
7797 << "LV: Interleaving is beneficial but is explicitly disabled.");
7798 IntDiagMsg = std::make_pair(
7799 "InterleavingBeneficialButDisabled",
7800 "the cost-model indicates that interleaving is beneficial "
7801 "but is explicitly disabled or interleave count is set to 1");
7802 InterleaveLoop = false;
7805 // Override IC if user provided an interleave count.
7806 IC = UserIC > 0 ? UserIC : IC;
7808 // Emit diagnostic messages, if any.
7809 const char *VAPassName = Hints.vectorizeAnalysisPassName();
7810 if (!VectorizeLoop && !InterleaveLoop) {
7811 // Do not vectorize or interleaving the loop.
7812 ORE->emit(OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7813 L->getStartLoc(), L->getHeader())
7814 << VecDiagMsg.second);
7815 ORE->emit(OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7816 L->getStartLoc(), L->getHeader())
7817 << IntDiagMsg.second);
7819 } else if (!VectorizeLoop && InterleaveLoop) {
7820 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7821 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7822 L->getStartLoc(), L->getHeader())
7823 << VecDiagMsg.second);
7824 } else if (VectorizeLoop && !InterleaveLoop) {
7825 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
7826 << DebugLocStr << '\n');
7827 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7828 L->getStartLoc(), L->getHeader())
7829 << IntDiagMsg.second);
7830 } else if (VectorizeLoop && InterleaveLoop) {
7831 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
7832 << DebugLocStr << '\n');
7833 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7836 using namespace ore;
7837 if (!VectorizeLoop) {
7838 assert(IC > 1 && "interleave count should not be 1 or 0");
7839 // If we decided that it is not legal to vectorize the loop, then
7841 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7843 Unroller.vectorize();
7845 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7847 << "interleaved loop (interleaved count: "
7848 << NV("InterleaveCount", IC) << ")");
7850 // If we decided that it is *legal* to vectorize the loop, then do it.
7851 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7856 // Add metadata to disable runtime unrolling a scalar loop when there are
7857 // no runtime checks about strides and memory. A scalar loop that is
7858 // rarely used is not worth unrolling.
7859 if (!LB.areSafetyChecksAdded())
7860 AddRuntimeUnrollDisableMetaData(L);
7862 // Report the vectorization decision.
7863 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7865 << "vectorized loop (vectorization width: "
7866 << NV("VectorizationFactor", VF.Width)
7867 << ", interleaved count: " << NV("InterleaveCount", IC) << ")");
7870 // Mark the loop as already vectorized to avoid vectorizing again.
7871 Hints.setAlreadyVectorized();
7873 DEBUG(verifyFunction(*L->getHeader()->getParent()));
7877 bool LoopVectorizePass::runImpl(
7878 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7879 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7880 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7881 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7882 OptimizationRemarkEmitter &ORE_) {
7896 // Compute some weights outside of the loop over the loops. Compute this
7897 // using a BranchProbability to re-use its scaling math.
7898 const BranchProbability ColdProb(1, 5); // 20%
7899 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb;
7902 // 1. the target claims to have no vector registers, and
7903 // 2. interleaving won't help ILP.
7905 // The second condition is necessary because, even if the target has no
7906 // vector registers, loop vectorization may still enable scalar
7908 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
7911 bool Changed = false;
7913 // The vectorizer requires loops to be in simplified form.
7914 // Since simplification may add new inner loops, it has to run before the
7915 // legality and profitability checks. This means running the loop vectorizer
7916 // will simplify all loops, regardless of whether anything end up being
7919 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
7921 // Build up a worklist of inner-loops to vectorize. This is necessary as
7922 // the act of vectorizing or partially unrolling a loop creates new loops
7923 // and can invalidate iterators across the loops.
7924 SmallVector<Loop *, 8> Worklist;
7927 addAcyclicInnerLoop(*L, Worklist);
7929 LoopsAnalyzed += Worklist.size();
7931 // Now walk the identified inner loops.
7932 while (!Worklist.empty()) {
7933 Loop *L = Worklist.pop_back_val();
7935 // For the inner loops we actually process, form LCSSA to simplify the
7937 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
7939 Changed |= processLoop(L);
7942 // Process each loop nest in the function.
7948 PreservedAnalyses LoopVectorizePass::run(Function &F,
7949 FunctionAnalysisManager &AM) {
7950 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
7951 auto &LI = AM.getResult<LoopAnalysis>(F);
7952 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
7953 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
7954 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
7955 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
7956 auto &AA = AM.getResult<AAManager>(F);
7957 auto &AC = AM.getResult<AssumptionAnalysis>(F);
7958 auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
7959 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
7961 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
7962 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
7963 [&](Loop &L) -> const LoopAccessInfo & {
7964 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI};
7965 return LAM.getResult<LoopAccessAnalysis>(L, AR);
7968 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
7970 return PreservedAnalyses::all();
7971 PreservedAnalyses PA;
7972 PA.preserve<LoopAnalysis>();
7973 PA.preserve<DominatorTreeAnalysis>();
7974 PA.preserve<BasicAA>();
7975 PA.preserve<GlobalsAA>();