1 //===- llvm/Analysis/VectorUtils.h - Vector utilities -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines some vectorizer utilities.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_ANALYSIS_VECTORUTILS_H
15 #define LLVM_ANALYSIS_VECTORUTILS_H
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/Analysis/LoopAccessAnalysis.h"
19 #include "llvm/Analysis/TargetLibraryInfo.h"
20 #include "llvm/IR/IRBuilder.h"
24 template <typename T> class ArrayRef;
26 class GetElementPtrInst;
27 template <typename InstTy> class InterleaveGroup;
29 class ScalarEvolution;
30 class TargetTransformInfo;
38 /// Identify if the intrinsic is trivially vectorizable.
39 /// This method returns true if the intrinsic's argument types are all
40 /// scalars for the scalar form of the intrinsic and all vectors for
41 /// the vector form of the intrinsic.
42 bool isTriviallyVectorizable(Intrinsic::ID ID);
44 /// Identifies if the intrinsic has a scalar operand. It checks for
45 /// ctlz,cttz and powi special intrinsics whose argument is scalar.
46 bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
48 /// Returns intrinsic ID for call.
49 /// For the input call instruction it finds mapping intrinsic and returns
50 /// its intrinsic ID, in case it does not found it return not_intrinsic.
51 Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
52 const TargetLibraryInfo *TLI);
54 /// Find the operand of the GEP that should be checked for consecutive
55 /// stores. This ignores trailing indices that have no effect on the final
57 unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
59 /// If the argument is a GEP, then returns the operand identified by
60 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
61 /// operand, it returns that instead.
62 Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
64 /// If a value has only one user that is a CastInst, return it.
65 Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
67 /// Get the stride of a pointer access in a loop. Looks for symbolic
68 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
69 Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
71 /// Given a vector and an element number, see if the scalar value is
72 /// already around as a register, for example if it were inserted then extracted
74 Value *findScalarElement(Value *V, unsigned EltNo);
76 /// Get splat value if the input is a splat vector or return nullptr.
77 /// The value may be extracted from a splat constants vector or from
78 /// a sequence of instructions that broadcast a single value into a vector.
79 const Value *getSplatValue(const Value *V);
81 /// Compute a map of integer instructions to their minimum legal type
84 /// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
85 /// type (e.g. i32) whenever arithmetic is performed on them.
87 /// For targets with native i8 or i16 operations, usually InstCombine can shrink
88 /// the arithmetic type down again. However InstCombine refuses to create
89 /// illegal types, so for targets without i8 or i16 registers, the lengthening
90 /// and shrinking remains.
92 /// Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
93 /// their scalar equivalents do not, so during vectorization it is important to
94 /// remove these lengthens and truncates when deciding the profitability of
97 /// This function analyzes the given range of instructions and determines the
98 /// minimum type size each can be converted to. It attempts to remove or
99 /// minimize type size changes across each def-use chain, so for example in the
102 /// %1 = load i8, i8*
103 /// %2 = add i8 %1, 2
104 /// %3 = load i16, i16*
105 /// %4 = zext i8 %2 to i32
106 /// %5 = zext i16 %3 to i32
107 /// %6 = add i32 %4, %5
108 /// %7 = trunc i32 %6 to i16
110 /// Instruction %6 must be done at least in i16, so computeMinimumValueSizes
111 /// will return: {%1: 16, %2: 16, %3: 16, %4: 16, %5: 16, %6: 16, %7: 16}.
113 /// If the optional TargetTransformInfo is provided, this function tries harder
114 /// to do less work by only looking at illegal types.
115 MapVector<Instruction*, uint64_t>
116 computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
118 const TargetTransformInfo *TTI=nullptr);
120 /// Compute the union of two access-group lists.
122 /// If the list contains just one access group, it is returned directly. If the
123 /// list is empty, returns nullptr.
124 MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
126 /// Compute the access-group list of access groups that @p Inst1 and @p Inst2
127 /// are both in. If either instruction does not access memory at all, it is
128 /// considered to be in every list.
130 /// If the list contains just one access group, it is returned directly. If the
131 /// list is empty, returns nullptr.
132 MDNode *intersectAccessGroups(const Instruction *Inst1,
133 const Instruction *Inst2);
135 /// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
136 /// MD_nontemporal, MD_access_group].
137 /// For K in Kinds, we get the MDNode for K from each of the
138 /// elements of VL, compute their "intersection" (i.e., the most generic
139 /// metadata value that covers all of the individual values), and set I's
140 /// metadata for M equal to the intersection value.
142 /// This function always sets a (possibly null) value for each K in Kinds.
143 Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
145 /// Create a mask that filters the members of an interleave group where there
148 /// For example, the mask for \p Group with interleave-factor 3
149 /// and \p VF 4, that has only its first member present is:
151 /// <1,0,0,1,0,0,1,0,0,1,0,0>
153 /// Note: The result is a mask of 0's and 1's, as opposed to the other
154 /// create[*]Mask() utilities which create a shuffle mask (mask that
155 /// consists of indices).
156 Constant *createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
157 const InterleaveGroup<Instruction> &Group);
159 /// Create a mask with replicated elements.
161 /// This function creates a shuffle mask for replicating each of the \p VF
162 /// elements in a vector \p ReplicationFactor times. It can be used to
163 /// transform a mask of \p VF elements into a mask of
164 /// \p VF * \p ReplicationFactor elements used by a predicated
165 /// interleaved-group of loads/stores whose Interleaved-factor ==
166 /// \p ReplicationFactor.
168 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
170 /// <0,0,0,1,1,1,2,2,2,3,3,3>
171 Constant *createReplicatedMask(IRBuilder<> &Builder, unsigned ReplicationFactor,
174 /// Create an interleave shuffle mask.
176 /// This function creates a shuffle mask for interleaving \p NumVecs vectors of
177 /// vectorization factor \p VF into a single wide vector. The mask is of the
180 /// <0, VF, VF * 2, ..., VF * (NumVecs - 1), 1, VF + 1, VF * 2 + 1, ...>
182 /// For example, the mask for VF = 4 and NumVecs = 2 is:
184 /// <0, 4, 1, 5, 2, 6, 3, 7>.
185 Constant *createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
188 /// Create a stride shuffle mask.
190 /// This function creates a shuffle mask whose elements begin at \p Start and
191 /// are incremented by \p Stride. The mask can be used to deinterleave an
192 /// interleaved vector into separate vectors of vectorization factor \p VF. The
193 /// mask is of the form:
195 /// <Start, Start + Stride, ..., Start + Stride * (VF - 1)>
197 /// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
200 Constant *createStrideMask(IRBuilder<> &Builder, unsigned Start,
201 unsigned Stride, unsigned VF);
203 /// Create a sequential shuffle mask.
205 /// This function creates shuffle mask whose elements are sequential and begin
206 /// at \p Start. The mask contains \p NumInts integers and is padded with \p
207 /// NumUndefs undef values. The mask is of the form:
209 /// <Start, Start + 1, ... Start + NumInts - 1, undef_1, ... undef_NumUndefs>
211 /// For example, the mask for Start = 0, NumInsts = 4, and NumUndefs = 4 is:
213 /// <0, 1, 2, 3, undef, undef, undef, undef>
214 Constant *createSequentialMask(IRBuilder<> &Builder, unsigned Start,
215 unsigned NumInts, unsigned NumUndefs);
217 /// Concatenate a list of vectors.
219 /// This function generates code that concatenate the vectors in \p Vecs into a
220 /// single large vector. The number of vectors should be greater than one, and
221 /// their element types should be the same. The number of elements in the
222 /// vectors should also be the same; however, if the last vector has fewer
223 /// elements, it will be padded with undefs.
224 Value *concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs);
226 /// The group of interleaved loads/stores sharing the same stride and
227 /// close to each other.
229 /// Each member in this group has an index starting from 0, and the largest
230 /// index should be less than interleaved factor, which is equal to the absolute
231 /// value of the access's stride.
233 /// E.g. An interleaved load group of factor 4:
234 /// for (unsigned i = 0; i < 1024; i+=4) {
235 /// a = A[i]; // Member of index 0
236 /// b = A[i+1]; // Member of index 1
237 /// d = A[i+3]; // Member of index 3
241 /// An interleaved store group of factor 4:
242 /// for (unsigned i = 0; i < 1024; i+=4) {
244 /// A[i] = a; // Member of index 0
245 /// A[i+1] = b; // Member of index 1
246 /// A[i+2] = c; // Member of index 2
247 /// A[i+3] = d; // Member of index 3
250 /// Note: the interleaved load group could have gaps (missing members), but
251 /// the interleaved store group doesn't allow gaps.
252 template <typename InstTy> class InterleaveGroup {
254 InterleaveGroup(unsigned Factor, bool Reverse, unsigned Align)
255 : Factor(Factor), Reverse(Reverse), Align(Align), InsertPos(nullptr) {}
257 InterleaveGroup(InstTy *Instr, int Stride, unsigned Align)
258 : Align(Align), InsertPos(Instr) {
259 assert(Align && "The alignment should be non-zero");
261 Factor = std::abs(Stride);
262 assert(Factor > 1 && "Invalid interleave factor");
264 Reverse = Stride < 0;
268 bool isReverse() const { return Reverse; }
269 unsigned getFactor() const { return Factor; }
270 unsigned getAlignment() const { return Align; }
271 unsigned getNumMembers() const { return Members.size(); }
273 /// Try to insert a new member \p Instr with index \p Index and
274 /// alignment \p NewAlign. The index is related to the leader and it could be
275 /// negative if it is the new leader.
277 /// \returns false if the instruction doesn't belong to the group.
278 bool insertMember(InstTy *Instr, int Index, unsigned NewAlign) {
279 assert(NewAlign && "The new member's alignment should be non-zero");
281 int Key = Index + SmallestKey;
283 // Skip if there is already a member with the same index.
284 if (Members.find(Key) != Members.end())
287 if (Key > LargestKey) {
288 // The largest index is always less than the interleave factor.
289 if (Index >= static_cast<int>(Factor))
293 } else if (Key < SmallestKey) {
294 // The largest index is always less than the interleave factor.
295 if (LargestKey - Key >= static_cast<int>(Factor))
301 // It's always safe to select the minimum alignment.
302 Align = std::min(Align, NewAlign);
303 Members[Key] = Instr;
307 /// Get the member with the given index \p Index
309 /// \returns nullptr if contains no such member.
310 InstTy *getMember(unsigned Index) const {
311 int Key = SmallestKey + Index;
312 auto Member = Members.find(Key);
313 if (Member == Members.end())
316 return Member->second;
319 /// Get the index for the given member. Unlike the key in the member
320 /// map, the index starts from 0.
321 unsigned getIndex(const InstTy *Instr) const {
322 for (auto I : Members) {
323 if (I.second == Instr)
324 return I.first - SmallestKey;
327 llvm_unreachable("InterleaveGroup contains no such member");
330 InstTy *getInsertPos() const { return InsertPos; }
331 void setInsertPos(InstTy *Inst) { InsertPos = Inst; }
333 /// Add metadata (e.g. alias info) from the instructions in this group to \p
336 /// FIXME: this function currently does not add noalias metadata a'la
337 /// addNewMedata. To do that we need to compute the intersection of the
338 /// noalias info from all members.
339 void addMetadata(InstTy *NewInst) const;
341 /// Returns true if this Group requires a scalar iteration to handle gaps.
342 bool requiresScalarEpilogue() const {
343 // If the last member of the Group exists, then a scalar epilog is not
344 // needed for this group.
345 if (getMember(getFactor() - 1))
348 // We have a group with gaps. It therefore cannot be a group of stores,
349 // and it can't be a reversed access, because such groups get invalidated.
350 assert(!getMember(0)->mayWriteToMemory() &&
351 "Group should have been invalidated");
352 assert(!isReverse() && "Group should have been invalidated");
354 // This is a group of loads, with gaps, and without a last-member
359 unsigned Factor; // Interleave Factor.
362 DenseMap<int, InstTy *> Members;
366 // To avoid breaking dependences, vectorized instructions of an interleave
367 // group should be inserted at either the first load or the last store in
370 // E.g. %even = load i32 // Insert Position
371 // %add = add i32 %even // Use of %even
375 // %odd = add i32 // Def of %odd
376 // store i32 %odd // Insert Position
380 /// Drive the analysis of interleaved memory accesses in the loop.
382 /// Use this class to analyze interleaved accesses only when we can vectorize
383 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
384 /// on interleaved accesses is unsafe.
386 /// The analysis collects interleave groups and records the relationships
387 /// between the member and the group in a map.
388 class InterleavedAccessInfo {
390 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
391 DominatorTree *DT, LoopInfo *LI,
392 const LoopAccessInfo *LAI)
393 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(LAI) {}
395 ~InterleavedAccessInfo() { reset(); }
397 /// Analyze the interleaved accesses and collect them in interleave
398 /// groups. Substitute symbolic strides using \p Strides.
399 /// Consider also predicated loads/stores in the analysis if
400 /// \p EnableMaskedInterleavedGroup is true.
401 void analyzeInterleaving(bool EnableMaskedInterleavedGroup);
403 /// Invalidate groups, e.g., in case all blocks in loop will be predicated
404 /// contrary to original assumption. Although we currently prevent group
405 /// formation for predicated accesses, we may be able to relax this limitation
406 /// in the future once we handle more complicated blocks.
408 SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet;
409 // Avoid releasing a pointer twice.
410 for (auto &I : InterleaveGroupMap)
411 DelSet.insert(I.second);
412 for (auto *Ptr : DelSet)
414 InterleaveGroupMap.clear();
415 RequiresScalarEpilogue = false;
419 /// Check if \p Instr belongs to any interleave group.
420 bool isInterleaved(Instruction *Instr) const {
421 return InterleaveGroupMap.find(Instr) != InterleaveGroupMap.end();
424 /// Get the interleave group that \p Instr belongs to.
426 /// \returns nullptr if doesn't have such group.
427 InterleaveGroup<Instruction> *
428 getInterleaveGroup(const Instruction *Instr) const {
429 if (InterleaveGroupMap.count(Instr))
430 return InterleaveGroupMap.find(Instr)->second;
434 iterator_range<SmallPtrSetIterator<llvm::InterleaveGroup<Instruction> *>>
435 getInterleaveGroups() {
436 return make_range(InterleaveGroups.begin(), InterleaveGroups.end());
439 /// Returns true if an interleaved group that may access memory
440 /// out-of-bounds requires a scalar epilogue iteration for correctness.
441 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
443 /// Invalidate groups that require a scalar epilogue (due to gaps). This can
444 /// happen when optimizing for size forbids a scalar epilogue, and the gap
445 /// cannot be filtered by masking the load/store.
446 void invalidateGroupsRequiringScalarEpilogue();
449 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
450 /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
451 /// The interleaved access analysis can also add new predicates (for example
452 /// by versioning strides of pointers).
453 PredicatedScalarEvolution &PSE;
458 const LoopAccessInfo *LAI;
460 /// True if the loop may contain non-reversed interleaved groups with
461 /// out-of-bounds accesses. We ensure we don't speculatively access memory
462 /// out-of-bounds by executing at least one scalar epilogue iteration.
463 bool RequiresScalarEpilogue = false;
465 /// Holds the relationships between the members and the interleave group.
466 DenseMap<Instruction *, InterleaveGroup<Instruction> *> InterleaveGroupMap;
468 SmallPtrSet<InterleaveGroup<Instruction> *, 4> InterleaveGroups;
470 /// Holds dependences among the memory accesses in the loop. It maps a source
471 /// access to a set of dependent sink accesses.
472 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
474 /// The descriptor for a strided memory access.
475 struct StrideDescriptor {
476 StrideDescriptor() = default;
477 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
479 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
481 // The access's stride. It is negative for a reverse access.
484 // The scalar expression of this access.
485 const SCEV *Scev = nullptr;
487 // The size of the memory object.
490 // The alignment of this access.
494 /// A type for holding instructions and their stride descriptors.
495 using StrideEntry = std::pair<Instruction *, StrideDescriptor>;
497 /// Create a new interleave group with the given instruction \p Instr,
498 /// stride \p Stride and alignment \p Align.
500 /// \returns the newly created interleave group.
501 InterleaveGroup<Instruction> *
502 createInterleaveGroup(Instruction *Instr, int Stride, unsigned Align) {
503 assert(!InterleaveGroupMap.count(Instr) &&
504 "Already in an interleaved access group");
505 InterleaveGroupMap[Instr] =
506 new InterleaveGroup<Instruction>(Instr, Stride, Align);
507 InterleaveGroups.insert(InterleaveGroupMap[Instr]);
508 return InterleaveGroupMap[Instr];
511 /// Release the group and remove all the relationships.
512 void releaseGroup(InterleaveGroup<Instruction> *Group) {
513 for (unsigned i = 0; i < Group->getFactor(); i++)
514 if (Instruction *Member = Group->getMember(i))
515 InterleaveGroupMap.erase(Member);
517 InterleaveGroups.erase(Group);
521 /// Collect all the accesses with a constant stride in program order.
522 void collectConstStrideAccesses(
523 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
524 const ValueToValueMap &Strides);
526 /// Returns true if \p Stride is allowed in an interleaved group.
527 static bool isStrided(int Stride);
529 /// Returns true if \p BB is a predicated block.
530 bool isPredicated(BasicBlock *BB) const {
531 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
534 /// Returns true if LoopAccessInfo can be used for dependence queries.
535 bool areDependencesValid() const {
536 return LAI && LAI->getDepChecker().getDependences();
539 /// Returns true if memory accesses \p A and \p B can be reordered, if
540 /// necessary, when constructing interleaved groups.
542 /// \p A must precede \p B in program order. We return false if reordering is
543 /// not necessary or is prevented because \p A and \p B may be dependent.
544 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
545 StrideEntry *B) const {
546 // Code motion for interleaved accesses can potentially hoist strided loads
547 // and sink strided stores. The code below checks the legality of the
548 // following two conditions:
550 // 1. Potentially moving a strided load (B) before any store (A) that
553 // 2. Potentially moving a strided store (A) after any load or store (B)
556 // It's legal to reorder A and B if we know there isn't a dependence from A
557 // to B. Note that this determination is conservative since some
558 // dependences could potentially be reordered safely.
560 // A is potentially the source of a dependence.
561 auto *Src = A->first;
562 auto SrcDes = A->second;
564 // B is potentially the sink of a dependence.
565 auto *Sink = B->first;
566 auto SinkDes = B->second;
568 // Code motion for interleaved accesses can't violate WAR dependences.
569 // Thus, reordering is legal if the source isn't a write.
570 if (!Src->mayWriteToMemory())
573 // At least one of the accesses must be strided.
574 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
577 // If dependence information is not available from LoopAccessInfo,
578 // conservatively assume the instructions can't be reordered.
579 if (!areDependencesValid())
582 // If we know there is a dependence from source to sink, assume the
583 // instructions can't be reordered. Otherwise, reordering is legal.
584 return Dependences.find(Src) == Dependences.end() ||
585 !Dependences.lookup(Src).count(Sink);
588 /// Collect the dependences from LoopAccessInfo.
590 /// We process the dependences once during the interleaved access analysis to
591 /// enable constant-time dependence queries.
592 void collectDependences() {
593 if (!areDependencesValid())
595 auto *Deps = LAI->getDepChecker().getDependences();
596 for (auto Dep : *Deps)
597 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));