1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Analysis/VectorUtils.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/raw_ostream.h"
70 #define DEBUG_TYPE "loop-accesses"
72 static cl::opt<unsigned, true>
73 VectorizationFactor("force-vector-width", cl::Hidden,
74 cl::desc("Sets the SIMD width. Zero is autoselect."),
75 cl::location(VectorizerParams::VectorizationFactor));
76 unsigned VectorizerParams::VectorizationFactor;
78 static cl::opt<unsigned, true>
79 VectorizationInterleave("force-vector-interleave", cl::Hidden,
80 cl::desc("Sets the vectorization interleave count. "
81 "Zero is autoselect."),
83 VectorizerParams::VectorizationInterleave));
84 unsigned VectorizerParams::VectorizationInterleave;
86 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
87 "runtime-memory-check-threshold", cl::Hidden,
88 cl::desc("When performing memory disambiguation checks at runtime do not "
89 "generate more than this number of comparisons (default = 8)."),
90 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
93 /// The maximum iterations used to merge memory checks
94 static cl::opt<unsigned> MemoryCheckMergeThreshold(
95 "memory-check-merge-threshold", cl::Hidden,
96 cl::desc("Maximum number of comparisons done when trying to merge "
97 "runtime memory checks. (default = 100)"),
100 /// Maximum SIMD width.
101 const unsigned VectorizerParams::MaxVectorWidth = 64;
103 /// We collect dependences up to this threshold.
104 static cl::opt<unsigned>
105 MaxDependences("max-dependences", cl::Hidden,
106 cl::desc("Maximum number of dependences collected by "
107 "loop-access analysis (default = 100)"),
110 /// This enables versioning on the strides of symbolically striding memory
111 /// accesses in code like the following.
112 /// for (i = 0; i < N; ++i)
113 /// A[i * Stride1] += B[i * Stride2] ...
115 /// Will be roughly translated to
116 /// if (Stride1 == 1 && Stride2 == 1) {
117 /// for (i = 0; i < N; i+=4)
121 static cl::opt<bool> EnableMemAccessVersioning(
122 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
123 cl::desc("Enable symbolic stride memory access versioning"));
125 /// Enable store-to-load forwarding conflict detection. This option can
126 /// be disabled for correctness testing.
127 static cl::opt<bool> EnableForwardingConflictDetection(
128 "store-to-load-forwarding-conflict-detection", cl::Hidden,
129 cl::desc("Enable conflict detection in loop-access analysis"),
132 bool VectorizerParams::isInterleaveForced() {
133 return ::VectorizationInterleave.getNumOccurrences() > 0;
136 Value *llvm::stripIntegerCast(Value *V) {
137 if (auto *CI = dyn_cast<CastInst>(V))
138 if (CI->getOperand(0)->getType()->isIntegerTy())
139 return CI->getOperand(0);
143 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
144 const ValueToValueMap &PtrToStride,
145 Value *Ptr, Value *OrigPtr) {
146 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
148 // If there is an entry in the map return the SCEV of the pointer with the
149 // symbolic stride replaced by one.
150 ValueToValueMap::const_iterator SI =
151 PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
152 if (SI != PtrToStride.end()) {
153 Value *StrideVal = SI->second;
156 StrideVal = stripIntegerCast(StrideVal);
158 ScalarEvolution *SE = PSE.getSE();
159 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
161 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
163 PSE.addPredicate(*SE->getEqualPredicate(U, CT));
164 auto *Expr = PSE.getSCEV(Ptr);
166 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
167 << " by: " << *Expr << "\n");
171 // Otherwise, just return the SCEV of the original pointer.
175 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
176 unsigned Index, RuntimePointerChecking &RtCheck)
177 : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
178 Low(RtCheck.Pointers[Index].Start) {
179 Members.push_back(Index);
182 /// Calculate Start and End points of memory access.
183 /// Let's assume A is the first access and B is a memory access on N-th loop
184 /// iteration. Then B is calculated as:
186 /// Step value may be positive or negative.
187 /// N is a calculated back-edge taken count:
188 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
189 /// Start and End points are calculated in the following way:
190 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
191 /// where SizeOfElt is the size of single memory access in bytes.
193 /// There is no conflict when the intervals are disjoint:
194 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
195 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
196 unsigned DepSetId, unsigned ASId,
197 const ValueToValueMap &Strides,
198 PredicatedScalarEvolution &PSE) {
199 // Get the stride replaced scev.
200 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
201 ScalarEvolution *SE = PSE.getSE();
206 if (SE->isLoopInvariant(Sc, Lp))
207 ScStart = ScEnd = Sc;
209 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
210 assert(AR && "Invalid addrec expression");
211 const SCEV *Ex = PSE.getBackedgeTakenCount();
213 ScStart = AR->getStart();
214 ScEnd = AR->evaluateAtIteration(Ex, *SE);
215 const SCEV *Step = AR->getStepRecurrence(*SE);
217 // For expressions with negative step, the upper bound is ScStart and the
218 // lower bound is ScEnd.
219 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
220 if (CStep->getValue()->isNegative())
221 std::swap(ScStart, ScEnd);
223 // Fallback case: the step is not constant, but we can still
224 // get the upper and lower bounds of the interval by using min/max
226 ScStart = SE->getUMinExpr(ScStart, ScEnd);
227 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
229 // Add the size of the pointed element to ScEnd.
231 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8;
232 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize);
233 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
236 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
239 SmallVector<RuntimePointerCheck, 4>
240 RuntimePointerChecking::generateChecks() const {
241 SmallVector<RuntimePointerCheck, 4> Checks;
243 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
244 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
245 const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
246 const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
248 if (needsChecking(CGI, CGJ))
249 Checks.push_back(std::make_pair(&CGI, &CGJ));
255 void RuntimePointerChecking::generateChecks(
256 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
257 assert(Checks.empty() && "Checks is not empty");
258 groupChecks(DepCands, UseDependencies);
259 Checks = generateChecks();
262 bool RuntimePointerChecking::needsChecking(
263 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
264 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
265 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
266 if (needsChecking(M.Members[I], N.Members[J]))
271 /// Compare \p I and \p J and return the minimum.
272 /// Return nullptr in case we couldn't find an answer.
273 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
274 ScalarEvolution *SE) {
275 const SCEV *Diff = SE->getMinusSCEV(J, I);
276 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
280 if (C->getValue()->isNegative())
285 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index) {
286 const SCEV *Start = RtCheck.Pointers[Index].Start;
287 const SCEV *End = RtCheck.Pointers[Index].End;
289 // Compare the starts and ends with the known minimum and maximum
290 // of this set. We need to know how we compare against the min/max
291 // of the set in order to be able to emit memchecks.
292 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
296 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
300 // Update the low bound expression if we've found a new min value.
304 // Update the high bound expression if we've found a new max value.
308 Members.push_back(Index);
312 void RuntimePointerChecking::groupChecks(
313 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
314 // We build the groups from dependency candidates equivalence classes
316 // - We know that pointers in the same equivalence class share
317 // the same underlying object and therefore there is a chance
318 // that we can compare pointers
319 // - We wouldn't be able to merge two pointers for which we need
320 // to emit a memcheck. The classes in DepCands are already
321 // conveniently built such that no two pointers in the same
322 // class need checking against each other.
324 // We use the following (greedy) algorithm to construct the groups
325 // For every pointer in the equivalence class:
326 // For each existing group:
327 // - if the difference between this pointer and the min/max bounds
328 // of the group is a constant, then make the pointer part of the
329 // group and update the min/max bounds of that group as required.
331 CheckingGroups.clear();
333 // If we need to check two pointers to the same underlying object
334 // with a non-constant difference, we shouldn't perform any pointer
335 // grouping with those pointers. This is because we can easily get
336 // into cases where the resulting check would return false, even when
337 // the accesses are safe.
339 // The following example shows this:
340 // for (i = 0; i < 1000; ++i)
341 // a[5000 + i * m] = a[i] + a[i + 9000]
343 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
344 // (0, 10000) which is always false. However, if m is 1, there is no
345 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
346 // us to perform an accurate check in this case.
348 // The above case requires that we have an UnknownDependence between
349 // accesses to the same underlying object. This cannot happen unless
350 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
351 // is also false. In this case we will use the fallback path and create
352 // separate checking groups for all pointers.
354 // If we don't have the dependency partitions, construct a new
355 // checking pointer group for each pointer. This is also required
356 // for correctness, because in this case we can have checking between
357 // pointers to the same underlying object.
358 if (!UseDependencies) {
359 for (unsigned I = 0; I < Pointers.size(); ++I)
360 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
364 unsigned TotalComparisons = 0;
366 DenseMap<Value *, unsigned> PositionMap;
367 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
368 PositionMap[Pointers[Index].PointerValue] = Index;
370 // We need to keep track of what pointers we've already seen so we
371 // don't process them twice.
372 SmallSet<unsigned, 2> Seen;
374 // Go through all equivalence classes, get the "pointer check groups"
375 // and add them to the overall solution. We use the order in which accesses
376 // appear in 'Pointers' to enforce determinism.
377 for (unsigned I = 0; I < Pointers.size(); ++I) {
378 // We've seen this pointer before, and therefore already processed
379 // its equivalence class.
383 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
384 Pointers[I].IsWritePtr);
386 SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
387 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
389 // Because DepCands is constructed by visiting accesses in the order in
390 // which they appear in alias sets (which is deterministic) and the
391 // iteration order within an equivalence class member is only dependent on
392 // the order in which unions and insertions are performed on the
393 // equivalence class, the iteration order is deterministic.
394 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
396 unsigned Pointer = PositionMap[MI->getPointer()];
398 // Mark this pointer as seen.
399 Seen.insert(Pointer);
401 // Go through all the existing sets and see if we can find one
402 // which can include this pointer.
403 for (RuntimeCheckingPtrGroup &Group : Groups) {
404 // Don't perform more than a certain amount of comparisons.
405 // This should limit the cost of grouping the pointers to something
406 // reasonable. If we do end up hitting this threshold, the algorithm
407 // will create separate groups for all remaining pointers.
408 if (TotalComparisons > MemoryCheckMergeThreshold)
413 if (Group.addPointer(Pointer)) {
420 // We couldn't add this pointer to any existing set or the threshold
421 // for the number of comparisons has been reached. Create a new group
422 // to hold the current pointer.
423 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
426 // We've computed the grouped checks for this partition.
427 // Save the results and continue with the next one.
428 llvm::copy(Groups, std::back_inserter(CheckingGroups));
432 bool RuntimePointerChecking::arePointersInSamePartition(
433 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
435 return (PtrToPartition[PtrIdx1] != -1 &&
436 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
439 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
440 const PointerInfo &PointerI = Pointers[I];
441 const PointerInfo &PointerJ = Pointers[J];
443 // No need to check if two readonly pointers intersect.
444 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
447 // Only need to check pointers between two different dependency sets.
448 if (PointerI.DependencySetId == PointerJ.DependencySetId)
451 // Only need to check pointers in the same alias set.
452 if (PointerI.AliasSetId != PointerJ.AliasSetId)
458 void RuntimePointerChecking::printChecks(
459 raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
460 unsigned Depth) const {
462 for (const auto &Check : Checks) {
463 const auto &First = Check.first->Members, &Second = Check.second->Members;
465 OS.indent(Depth) << "Check " << N++ << ":\n";
467 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
468 for (unsigned K = 0; K < First.size(); ++K)
469 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
471 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
472 for (unsigned K = 0; K < Second.size(); ++K)
473 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
477 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
479 OS.indent(Depth) << "Run-time memory checks:\n";
480 printChecks(OS, Checks, Depth);
482 OS.indent(Depth) << "Grouped accesses:\n";
483 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
484 const auto &CG = CheckingGroups[I];
486 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
487 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
489 for (unsigned J = 0; J < CG.Members.size(); ++J) {
490 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
498 /// Analyses memory accesses in a loop.
500 /// Checks whether run time pointer checks are needed and builds sets for data
501 /// dependence checking.
502 class AccessAnalysis {
504 /// Read or write access location.
505 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
506 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
508 AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AAResults *AA,
509 LoopInfo *LI, MemoryDepChecker::DepCandidates &DA,
510 PredicatedScalarEvolution &PSE)
511 : DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
512 IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
514 /// Register a load and whether it is only read from.
515 void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
516 Value *Ptr = const_cast<Value*>(Loc.Ptr);
517 AST.add(Ptr, LocationSize::unknown(), Loc.AATags);
518 Accesses.insert(MemAccessInfo(Ptr, false));
520 ReadOnlyPtr.insert(Ptr);
523 /// Register a store.
524 void addStore(MemoryLocation &Loc) {
525 Value *Ptr = const_cast<Value*>(Loc.Ptr);
526 AST.add(Ptr, LocationSize::unknown(), Loc.AATags);
527 Accesses.insert(MemAccessInfo(Ptr, true));
530 /// Check if we can emit a run-time no-alias check for \p Access.
532 /// Returns true if we can emit a run-time no alias check for \p Access.
533 /// If we can check this access, this also adds it to a dependence set and
534 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
535 /// we will attempt to use additional run-time checks in order to get
536 /// the bounds of the pointer.
537 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
538 MemAccessInfo Access,
539 const ValueToValueMap &Strides,
540 DenseMap<Value *, unsigned> &DepSetId,
541 Loop *TheLoop, unsigned &RunningDepId,
542 unsigned ASId, bool ShouldCheckStride,
545 /// Check whether we can check the pointers at runtime for
546 /// non-intersection.
548 /// Returns true if we need no check or if we do and we can generate them
549 /// (i.e. the pointers have computable bounds).
550 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
551 Loop *TheLoop, const ValueToValueMap &Strides,
552 bool ShouldCheckWrap = false);
554 /// Goes over all memory accesses, checks whether a RT check is needed
555 /// and builds sets of dependent accesses.
556 void buildDependenceSets() {
557 processMemAccesses();
560 /// Initial processing of memory accesses determined that we need to
561 /// perform dependency checking.
563 /// Note that this can later be cleared if we retry memcheck analysis without
564 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
565 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
567 /// We decided that no dependence analysis would be used. Reset the state.
568 void resetDepChecks(MemoryDepChecker &DepChecker) {
570 DepChecker.clearDependences();
573 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
576 typedef SetVector<MemAccessInfo> PtrAccessSet;
578 /// Go over all memory access and check whether runtime pointer checks
579 /// are needed and build sets of dependency check candidates.
580 void processMemAccesses();
582 /// Set of all accesses.
583 PtrAccessSet Accesses;
585 const DataLayout &DL;
587 /// The loop being checked.
590 /// List of accesses that need a further dependence check.
591 MemAccessInfoList CheckDeps;
593 /// Set of pointers that are read only.
594 SmallPtrSet<Value*, 16> ReadOnlyPtr;
596 /// An alias set tracker to partition the access set by underlying object and
597 //intrinsic property (such as TBAA metadata).
602 /// Sets of potentially dependent accesses - members of one set share an
603 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
604 /// dependence check.
605 MemoryDepChecker::DepCandidates &DepCands;
607 /// Initial processing of memory accesses determined that we may need
608 /// to add memchecks. Perform the analysis to determine the necessary checks.
610 /// Note that, this is different from isDependencyCheckNeeded. When we retry
611 /// memcheck analysis without dependency checking
612 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
613 /// cleared while this remains set if we have potentially dependent accesses.
614 bool IsRTCheckAnalysisNeeded;
616 /// The SCEV predicate containing all the SCEV-related assumptions.
617 PredicatedScalarEvolution &PSE;
620 } // end anonymous namespace
622 /// Check whether a pointer can participate in a runtime bounds check.
623 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
624 /// by adding run-time checks (overflow checks) if necessary.
625 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
626 const ValueToValueMap &Strides, Value *Ptr,
627 Loop *L, bool Assume) {
628 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
630 // The bounds for loop-invariant pointer is trivial.
631 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
634 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
637 AR = PSE.getAsAddRec(Ptr);
642 return AR->isAffine();
645 /// Check whether a pointer address cannot wrap.
646 static bool isNoWrap(PredicatedScalarEvolution &PSE,
647 const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
648 const SCEV *PtrScev = PSE.getSCEV(Ptr);
649 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
652 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
653 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
659 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
660 MemAccessInfo Access,
661 const ValueToValueMap &StridesMap,
662 DenseMap<Value *, unsigned> &DepSetId,
663 Loop *TheLoop, unsigned &RunningDepId,
664 unsigned ASId, bool ShouldCheckWrap,
666 Value *Ptr = Access.getPointer();
668 if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume))
671 // When we run after a failing dependency check we have to make sure
672 // we don't have wrapping pointers.
673 if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) {
674 auto *Expr = PSE.getSCEV(Ptr);
675 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
677 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
680 // The id of the dependence set.
683 if (isDependencyCheckNeeded()) {
684 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
685 unsigned &LeaderId = DepSetId[Leader];
687 LeaderId = RunningDepId++;
690 // Each access has its own dependence set.
691 DepId = RunningDepId++;
693 bool IsWrite = Access.getInt();
694 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
695 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
700 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
701 ScalarEvolution *SE, Loop *TheLoop,
702 const ValueToValueMap &StridesMap,
703 bool ShouldCheckWrap) {
704 // Find pointers with computable bounds. We are going to use this information
705 // to place a runtime bound check.
708 bool MayNeedRTCheck = false;
709 if (!IsRTCheckAnalysisNeeded) return true;
711 bool IsDepCheckNeeded = isDependencyCheckNeeded();
713 // We assign a consecutive id to access from different alias sets.
714 // Accesses between different groups doesn't need to be checked.
716 for (auto &AS : AST) {
717 int NumReadPtrChecks = 0;
718 int NumWritePtrChecks = 0;
719 bool CanDoAliasSetRT = true;
722 // We assign consecutive id to access from different dependence sets.
723 // Accesses within the same set don't need a runtime check.
724 unsigned RunningDepId = 1;
725 DenseMap<Value *, unsigned> DepSetId;
727 SmallVector<MemAccessInfo, 4> Retries;
730 Value *Ptr = A.getValue();
731 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
732 MemAccessInfo Access(Ptr, IsWrite);
739 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
740 RunningDepId, ASId, ShouldCheckWrap, false)) {
741 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
742 Retries.push_back(Access);
743 CanDoAliasSetRT = false;
747 // If we have at least two writes or one write and a read then we need to
748 // check them. But there is no need to checks if there is only one
749 // dependence set for this alias set.
751 // Note that this function computes CanDoRT and MayNeedRTCheck
752 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
753 // we have a pointer for which we couldn't find the bounds but we don't
754 // actually need to emit any checks so it does not matter.
755 bool NeedsAliasSetRTCheck = false;
756 if (!(IsDepCheckNeeded && CanDoAliasSetRT && RunningDepId == 2)) {
757 NeedsAliasSetRTCheck = (NumWritePtrChecks >= 2 ||
758 (NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1));
759 // For alias sets without at least 2 writes or 1 write and 1 read, there
760 // is no need to generate RT checks and CanDoAliasSetRT for this alias set
761 // does not impact whether runtime checks can be generated.
762 if (!NeedsAliasSetRTCheck) {
763 assert((AS.size() <= 1 ||
766 MemAccessInfo AccessWrite(AC.getValue(), true);
767 return DepCands.findValue(AccessWrite) ==
770 "Can only skip updating CanDoRT below, if all entries in AS "
771 "are reads or there is at most 1 entry");
776 // We need to perform run-time alias checks, but some pointers had bounds
777 // that couldn't be checked.
778 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
779 // Reset the CanDoSetRt flag and retry all accesses that have failed.
780 // We know that we need these checks, so we can now be more aggressive
781 // and add further checks if required (overflow checks).
782 CanDoAliasSetRT = true;
783 for (auto Access : Retries)
784 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId,
785 TheLoop, RunningDepId, ASId,
786 ShouldCheckWrap, /*Assume=*/true)) {
787 CanDoAliasSetRT = false;
792 CanDoRT &= CanDoAliasSetRT;
793 MayNeedRTCheck |= NeedsAliasSetRTCheck;
797 // If the pointers that we would use for the bounds comparison have different
798 // address spaces, assume the values aren't directly comparable, so we can't
799 // use them for the runtime check. We also have to assume they could
800 // overlap. In the future there should be metadata for whether address spaces
802 unsigned NumPointers = RtCheck.Pointers.size();
803 for (unsigned i = 0; i < NumPointers; ++i) {
804 for (unsigned j = i + 1; j < NumPointers; ++j) {
805 // Only need to check pointers between two different dependency sets.
806 if (RtCheck.Pointers[i].DependencySetId ==
807 RtCheck.Pointers[j].DependencySetId)
809 // Only need to check pointers in the same alias set.
810 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
813 Value *PtrI = RtCheck.Pointers[i].PointerValue;
814 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
816 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
817 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
820 dbgs() << "LAA: Runtime check would require comparison between"
821 " different address spaces\n");
827 if (MayNeedRTCheck && CanDoRT)
828 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
830 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
831 << " pointer comparisons.\n");
833 // If we can do run-time checks, but there are no checks, no runtime checks
834 // are needed. This can happen when all pointers point to the same underlying
835 // object for example.
836 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
838 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
839 if (!CanDoRTIfNeeded)
841 return CanDoRTIfNeeded;
844 void AccessAnalysis::processMemAccesses() {
845 // We process the set twice: first we process read-write pointers, last we
846 // process read-only pointers. This allows us to skip dependence tests for
847 // read-only pointers.
849 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
850 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
851 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
853 for (auto A : Accesses)
854 dbgs() << "\t" << *A.getPointer() << " (" <<
855 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
856 "read-only" : "read")) << ")\n";
859 // The AliasSetTracker has nicely partitioned our pointers by metadata
860 // compatibility and potential for underlying-object overlap. As a result, we
861 // only need to check for potential pointer dependencies within each alias
863 for (auto &AS : AST) {
864 // Note that both the alias-set tracker and the alias sets themselves used
865 // linked lists internally and so the iteration order here is deterministic
866 // (matching the original instruction order within each set).
868 bool SetHasWrite = false;
870 // Map of pointers to last access encountered.
871 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
872 UnderlyingObjToAccessMap ObjToLastAccess;
874 // Set of access to check after all writes have been processed.
875 PtrAccessSet DeferredAccesses;
877 // Iterate over each alias set twice, once to process read/write pointers,
878 // and then to process read-only pointers.
879 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
880 bool UseDeferred = SetIteration > 0;
881 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
884 Value *Ptr = AV.getValue();
886 // For a single memory access in AliasSetTracker, Accesses may contain
887 // both read and write, and they both need to be handled for CheckDeps.
889 if (AC.getPointer() != Ptr)
892 bool IsWrite = AC.getInt();
894 // If we're using the deferred access set, then it contains only
896 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
897 if (UseDeferred && !IsReadOnlyPtr)
899 // Otherwise, the pointer must be in the PtrAccessSet, either as a
901 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
902 S.count(MemAccessInfo(Ptr, false))) &&
903 "Alias-set pointer not in the access set?");
905 MemAccessInfo Access(Ptr, IsWrite);
906 DepCands.insert(Access);
908 // Memorize read-only pointers for later processing and skip them in
909 // the first round (they need to be checked after we have seen all
910 // write pointers). Note: we also mark pointer that are not
911 // consecutive as "read-only" pointers (so that we check
912 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
913 if (!UseDeferred && IsReadOnlyPtr) {
914 DeferredAccesses.insert(Access);
918 // If this is a write - check other reads and writes for conflicts. If
919 // this is a read only check other writes for conflicts (but only if
920 // there is no other write to the ptr - this is an optimization to
921 // catch "a[i] = a[i] + " without having to do a dependence check).
922 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
923 CheckDeps.push_back(Access);
924 IsRTCheckAnalysisNeeded = true;
930 // Create sets of pointers connected by a shared alias set and
931 // underlying object.
932 typedef SmallVector<const Value *, 16> ValueVector;
933 ValueVector TempObjects;
935 GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
937 << "Underlying objects for pointer " << *Ptr << "\n");
938 for (const Value *UnderlyingObj : TempObjects) {
939 // nullptr never alias, don't join sets for pointer that have "null"
940 // in their UnderlyingObjects list.
941 if (isa<ConstantPointerNull>(UnderlyingObj) &&
942 !NullPointerIsDefined(
943 TheLoop->getHeader()->getParent(),
944 UnderlyingObj->getType()->getPointerAddressSpace()))
947 UnderlyingObjToAccessMap::iterator Prev =
948 ObjToLastAccess.find(UnderlyingObj);
949 if (Prev != ObjToLastAccess.end())
950 DepCands.unionSets(Access, Prev->second);
952 ObjToLastAccess[UnderlyingObj] = Access;
953 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
961 static bool isInBoundsGep(Value *Ptr) {
962 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
963 return GEP->isInBounds();
967 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
968 /// i.e. monotonically increasing/decreasing.
969 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
970 PredicatedScalarEvolution &PSE, const Loop *L) {
971 // FIXME: This should probably only return true for NUW.
972 if (AR->getNoWrapFlags(SCEV::NoWrapMask))
975 // Scalar evolution does not propagate the non-wrapping flags to values that
976 // are derived from a non-wrapping induction variable because non-wrapping
977 // could be flow-sensitive.
979 // Look through the potentially overflowing instruction to try to prove
980 // non-wrapping for the *specific* value of Ptr.
982 // The arithmetic implied by an inbounds GEP can't overflow.
983 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
984 if (!GEP || !GEP->isInBounds())
987 // Make sure there is only one non-const index and analyze that.
988 Value *NonConstIndex = nullptr;
989 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end()))
990 if (!isa<ConstantInt>(Index)) {
993 NonConstIndex = Index;
996 // The recurrence is on the pointer, ignore for now.
999 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1000 // AddRec using a NSW operation.
1001 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1002 if (OBO->hasNoSignedWrap() &&
1003 // Assume constant for other the operand so that the AddRec can be
1005 isa<ConstantInt>(OBO->getOperand(1))) {
1006 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1008 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1009 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1015 /// Check whether the access through \p Ptr has a constant stride.
1016 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
1017 const Loop *Lp, const ValueToValueMap &StridesMap,
1018 bool Assume, bool ShouldCheckWrap) {
1019 Type *Ty = Ptr->getType();
1020 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1022 // Make sure that the pointer does not point to aggregate types.
1023 auto *PtrTy = cast<PointerType>(Ty);
1024 if (PtrTy->getElementType()->isAggregateType()) {
1025 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
1030 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1032 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1034 AR = PSE.getAsAddRec(Ptr);
1037 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1038 << " SCEV: " << *PtrScev << "\n");
1042 // The access function must stride over the innermost loop.
1043 if (Lp != AR->getLoop()) {
1044 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1045 << *Ptr << " SCEV: " << *AR << "\n");
1049 // The address calculation must not wrap. Otherwise, a dependence could be
1051 // An inbounds getelementptr that is a AddRec with a unit stride
1052 // cannot wrap per definition. The unit stride requirement is checked later.
1053 // An getelementptr without an inbounds attribute and unit stride would have
1054 // to access the pointer value "0" which is undefined behavior in address
1055 // space 0, therefore we can also vectorize this case.
1056 bool IsInBoundsGEP = isInBoundsGep(Ptr);
1057 bool IsNoWrapAddRec = !ShouldCheckWrap ||
1058 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
1059 isNoWrapAddRec(Ptr, AR, PSE, Lp);
1060 if (!IsNoWrapAddRec && !IsInBoundsGEP &&
1061 NullPointerIsDefined(Lp->getHeader()->getParent(),
1062 PtrTy->getAddressSpace())) {
1064 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1065 IsNoWrapAddRec = true;
1066 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1067 << "LAA: Pointer: " << *Ptr << "\n"
1068 << "LAA: SCEV: " << *AR << "\n"
1069 << "LAA: Added an overflow assumption\n");
1072 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1073 << *Ptr << " SCEV: " << *AR << "\n");
1078 // Check the step is constant.
1079 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1081 // Calculate the pointer stride and check if it is constant.
1082 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1084 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1085 << " SCEV: " << *AR << "\n");
1089 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1090 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
1091 const APInt &APStepVal = C->getAPInt();
1093 // Huge step value - give up.
1094 if (APStepVal.getBitWidth() > 64)
1097 int64_t StepVal = APStepVal.getSExtValue();
1100 int64_t Stride = StepVal / Size;
1101 int64_t Rem = StepVal % Size;
1105 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1106 // know we can't "wrap around the address space". In case of address space
1107 // zero we know that this won't happen without triggering undefined behavior.
1108 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
1109 (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
1110 PtrTy->getAddressSpace()))) {
1112 // We can avoid this case by adding a run-time check.
1113 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1114 << "inbounds or in address space 0 may wrap:\n"
1115 << "LAA: Pointer: " << *Ptr << "\n"
1116 << "LAA: SCEV: " << *AR << "\n"
1117 << "LAA: Added an overflow assumption\n");
1118 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1126 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
1127 ScalarEvolution &SE,
1128 SmallVectorImpl<unsigned> &SortedIndices) {
1129 assert(llvm::all_of(
1130 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1131 "Expected list of pointer operands.");
1132 SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
1133 OffValPairs.reserve(VL.size());
1135 // Walk over the pointers, and map each of them to an offset relative to
1136 // first pointer in the array.
1137 Value *Ptr0 = VL[0];
1138 const SCEV *Scev0 = SE.getSCEV(Ptr0);
1139 Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
1141 llvm::SmallSet<int64_t, 4> Offsets;
1142 for (auto *Ptr : VL) {
1143 // TODO: Outline this code as a special, more time consuming, version of
1144 // computeConstantDifference() function.
1145 if (Ptr->getType()->getPointerAddressSpace() !=
1146 Ptr0->getType()->getPointerAddressSpace())
1148 // If a pointer refers to a different underlying object, bail - the
1149 // pointers are by definition incomparable.
1150 Value *CurrObj = GetUnderlyingObject(Ptr, DL);
1151 if (CurrObj != Obj0)
1154 const SCEV *Scev = SE.getSCEV(Ptr);
1155 const auto *Diff = dyn_cast<SCEVConstant>(SE.getMinusSCEV(Scev, Scev0));
1156 // The pointers may not have a constant offset from each other, or SCEV
1157 // may just not be smart enough to figure out they do. Regardless,
1158 // there's nothing we can do.
1162 // Check if the pointer with the same offset is found.
1163 int64_t Offset = Diff->getAPInt().getSExtValue();
1164 if (!Offsets.insert(Offset).second)
1166 OffValPairs.emplace_back(Offset, Ptr);
1168 SortedIndices.clear();
1169 SortedIndices.resize(VL.size());
1170 std::iota(SortedIndices.begin(), SortedIndices.end(), 0);
1172 // Sort the memory accesses and keep the order of their uses in UseOrder.
1173 llvm::stable_sort(SortedIndices, [&](unsigned Left, unsigned Right) {
1174 return OffValPairs[Left].first < OffValPairs[Right].first;
1177 // Check if the order is consecutive already.
1178 if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) {
1179 return I == SortedIndices[I];
1181 SortedIndices.clear();
1186 /// Take the address space operand from the Load/Store instruction.
1187 /// Returns -1 if this is not a valid Load/Store instruction.
1188 static unsigned getAddressSpaceOperand(Value *I) {
1189 if (LoadInst *L = dyn_cast<LoadInst>(I))
1190 return L->getPointerAddressSpace();
1191 if (StoreInst *S = dyn_cast<StoreInst>(I))
1192 return S->getPointerAddressSpace();
1196 /// Returns true if the memory operations \p A and \p B are consecutive.
1197 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1198 ScalarEvolution &SE, bool CheckType) {
1199 Value *PtrA = getLoadStorePointerOperand(A);
1200 Value *PtrB = getLoadStorePointerOperand(B);
1201 unsigned ASA = getAddressSpaceOperand(A);
1202 unsigned ASB = getAddressSpaceOperand(B);
1204 // Check that the address spaces match and that the pointers are valid.
1205 if (!PtrA || !PtrB || (ASA != ASB))
1208 // Make sure that A and B are different pointers.
1212 // Make sure that A and B have the same type if required.
1213 if (CheckType && PtrA->getType() != PtrB->getType())
1216 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1217 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1219 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1220 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1221 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1223 // Retrieve the address space again as pointer stripping now tracks through
1225 ASA = cast<PointerType>(PtrA->getType())->getAddressSpace();
1226 ASB = cast<PointerType>(PtrB->getType())->getAddressSpace();
1227 // Check that the address spaces match and that the pointers are valid.
1231 IdxWidth = DL.getIndexSizeInBits(ASA);
1232 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1233 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1235 APInt Size(IdxWidth, DL.getTypeStoreSize(Ty));
1237 // OffsetDelta = OffsetB - OffsetA;
1238 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
1239 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
1240 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
1241 const APInt &OffsetDelta = cast<SCEVConstant>(OffsetDeltaSCEV)->getAPInt();
1243 // Check if they are based on the same pointer. That makes the offsets
1246 return OffsetDelta == Size;
1248 // Compute the necessary base pointer delta to have the necessary final delta
1249 // equal to the size.
1250 // BaseDelta = Size - OffsetDelta;
1251 const SCEV *SizeSCEV = SE.getConstant(Size);
1252 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
1254 // Otherwise compute the distance with SCEV between the base pointers.
1255 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1256 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1257 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
1258 return X == PtrSCEVB;
1261 MemoryDepChecker::VectorizationSafetyStatus
1262 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1266 case BackwardVectorizable:
1267 return VectorizationSafetyStatus::Safe;
1270 return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1271 case ForwardButPreventsForwarding:
1273 case BackwardVectorizableButPreventsForwarding:
1274 return VectorizationSafetyStatus::Unsafe;
1276 llvm_unreachable("unexpected DepType!");
1279 bool MemoryDepChecker::Dependence::isBackward() const {
1283 case ForwardButPreventsForwarding:
1287 case BackwardVectorizable:
1289 case BackwardVectorizableButPreventsForwarding:
1292 llvm_unreachable("unexpected DepType!");
1295 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1296 return isBackward() || Type == Unknown;
1299 bool MemoryDepChecker::Dependence::isForward() const {
1302 case ForwardButPreventsForwarding:
1307 case BackwardVectorizable:
1309 case BackwardVectorizableButPreventsForwarding:
1312 llvm_unreachable("unexpected DepType!");
1315 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1316 uint64_t TypeByteSize) {
1317 // If loads occur at a distance that is not a multiple of a feasible vector
1318 // factor store-load forwarding does not take place.
1319 // Positive dependences might cause troubles because vectorizing them might
1320 // prevent store-load forwarding making vectorized code run a lot slower.
1321 // a[i] = a[i-3] ^ a[i-8];
1322 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1323 // hence on your typical architecture store-load forwarding does not take
1324 // place. Vectorizing in such cases does not make sense.
1325 // Store-load forwarding distance.
1327 // After this many iterations store-to-load forwarding conflicts should not
1328 // cause any slowdowns.
1329 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1330 // Maximum vector factor.
1331 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1332 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1334 // Compute the smallest VF at which the store and load would be misaligned.
1335 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1337 // If the number of vector iteration between the store and the load are
1338 // small we could incur conflicts.
1339 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1340 MaxVFWithoutSLForwardIssues = (VF >>= 1);
1345 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1347 dbgs() << "LAA: Distance " << Distance
1348 << " that could cause a store-load forwarding conflict\n");
1352 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1353 MaxVFWithoutSLForwardIssues !=
1354 VectorizerParams::MaxVectorWidth * TypeByteSize)
1355 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1359 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1364 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1365 /// memory accesses, that have the same stride whose absolute value is given
1366 /// in \p Stride, and that have the same type size \p TypeByteSize,
1367 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1368 /// possible to prove statically that the dependence distance is larger
1369 /// than the range that the accesses will travel through the execution of
1370 /// the loop. If so, return true; false otherwise. This is useful for
1371 /// example in loops such as the following (PR31098):
1372 /// for (i = 0; i < D; ++i) {
1376 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1377 const SCEV &BackedgeTakenCount,
1378 const SCEV &Dist, uint64_t Stride,
1379 uint64_t TypeByteSize) {
1381 // If we can prove that
1382 // (**) |Dist| > BackedgeTakenCount * Step
1383 // where Step is the absolute stride of the memory accesses in bytes,
1384 // then there is no dependence.
1387 // We basically want to check if the absolute distance (|Dist/Step|)
1388 // is >= the loop iteration count (or > BackedgeTakenCount).
1389 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1390 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1391 // that the dependence distance is >= VF; This is checked elsewhere.
1392 // But in some cases we can prune unknown dependence distances early, and
1393 // even before selecting the VF, and without a runtime test, by comparing
1394 // the distance against the loop iteration count. Since the vectorized code
1395 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1396 // also guarantees that distance >= VF.
1398 const uint64_t ByteStride = Stride * TypeByteSize;
1399 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1400 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1402 const SCEV *CastedDist = &Dist;
1403 const SCEV *CastedProduct = Product;
1404 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1405 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1407 // The dependence distance can be positive/negative, so we sign extend Dist;
1408 // The multiplication of the absolute stride in bytes and the
1409 // backedgeTakenCount is non-negative, so we zero extend Product.
1410 if (DistTypeSize > ProductTypeSize)
1411 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1413 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1415 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1416 // (If so, then we have proven (**) because |Dist| >= Dist)
1417 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1418 if (SE.isKnownPositive(Minus))
1421 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1422 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1423 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1424 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1425 if (SE.isKnownPositive(Minus))
1431 /// Check the dependence for two accesses with the same stride \p Stride.
1432 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1435 /// \returns true if they are independent.
1436 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1437 uint64_t TypeByteSize) {
1438 assert(Stride > 1 && "The stride must be greater than 1");
1439 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1440 assert(Distance > 0 && "The distance must be non-zero");
1442 // Skip if the distance is not multiple of type byte size.
1443 if (Distance % TypeByteSize)
1446 uint64_t ScaledDist = Distance / TypeByteSize;
1448 // No dependence if the scaled distance is not multiple of the stride.
1450 // for (i = 0; i < 1024 ; i += 4)
1451 // A[i+2] = A[i] + 1;
1453 // Two accesses in memory (scaled distance is 2, stride is 4):
1454 // | A[0] | | | | A[4] | | | |
1455 // | | | A[2] | | | | A[6] | |
1458 // for (i = 0; i < 1024 ; i += 3)
1459 // A[i+4] = A[i] + 1;
1461 // Two accesses in memory (scaled distance is 4, stride is 3):
1462 // | A[0] | | | A[3] | | | A[6] | | |
1463 // | | | | | A[4] | | | A[7] | |
1464 return ScaledDist % Stride;
1467 MemoryDepChecker::Dependence::DepType
1468 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1469 const MemAccessInfo &B, unsigned BIdx,
1470 const ValueToValueMap &Strides) {
1471 assert (AIdx < BIdx && "Must pass arguments in program order");
1473 Value *APtr = A.getPointer();
1474 Value *BPtr = B.getPointer();
1475 bool AIsWrite = A.getInt();
1476 bool BIsWrite = B.getInt();
1478 // Two reads are independent.
1479 if (!AIsWrite && !BIsWrite)
1480 return Dependence::NoDep;
1482 // We cannot check pointers in different address spaces.
1483 if (APtr->getType()->getPointerAddressSpace() !=
1484 BPtr->getType()->getPointerAddressSpace())
1485 return Dependence::Unknown;
1487 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1488 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1490 const SCEV *Src = PSE.getSCEV(APtr);
1491 const SCEV *Sink = PSE.getSCEV(BPtr);
1493 // If the induction step is negative we have to invert source and sink of the
1495 if (StrideAPtr < 0) {
1496 std::swap(APtr, BPtr);
1497 std::swap(Src, Sink);
1498 std::swap(AIsWrite, BIsWrite);
1499 std::swap(AIdx, BIdx);
1500 std::swap(StrideAPtr, StrideBPtr);
1503 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1505 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1506 << "(Induction step: " << StrideAPtr << ")\n");
1507 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1508 << *InstMap[BIdx] << ": " << *Dist << "\n");
1510 // Need accesses with constant stride. We don't want to vectorize
1511 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1512 // the address space.
1513 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1514 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1515 return Dependence::Unknown;
1518 Type *ATy = APtr->getType()->getPointerElementType();
1519 Type *BTy = BPtr->getType()->getPointerElementType();
1520 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1521 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1522 uint64_t Stride = std::abs(StrideAPtr);
1523 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1525 if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
1526 isSafeDependenceDistance(DL, *(PSE.getSE()),
1527 *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1529 return Dependence::NoDep;
1531 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1532 FoundNonConstantDistanceDependence = true;
1533 return Dependence::Unknown;
1536 const APInt &Val = C->getAPInt();
1537 int64_t Distance = Val.getSExtValue();
1539 // Attempt to prove strided accesses independent.
1540 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1541 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1542 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1543 return Dependence::NoDep;
1546 // Negative distances are not plausible dependencies.
1547 if (Val.isNegative()) {
1548 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1549 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1550 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1552 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1553 return Dependence::ForwardButPreventsForwarding;
1556 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1557 return Dependence::Forward;
1560 // Write to the same location with the same size.
1561 // Could be improved to assert type sizes are the same (i32 == float, etc).
1564 return Dependence::Forward;
1566 dbgs() << "LAA: Zero dependence difference but different types\n");
1567 return Dependence::Unknown;
1570 assert(Val.isStrictlyPositive() && "Expect a positive value");
1575 << "LAA: ReadWrite-Write positive dependency with different types\n");
1576 return Dependence::Unknown;
1579 // Bail out early if passed-in parameters make vectorization not feasible.
1580 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1581 VectorizerParams::VectorizationFactor : 1);
1582 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1583 VectorizerParams::VectorizationInterleave : 1);
1584 // The minimum number of iterations for a vectorized/unrolled version.
1585 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1587 // It's not vectorizable if the distance is smaller than the minimum distance
1588 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1589 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1590 // TypeByteSize (No need to plus the last gap distance).
1592 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1594 // int *B = (int *)((char *)A + 14);
1595 // for (i = 0 ; i < 1024 ; i += 2)
1599 // Two accesses in memory (stride is 2):
1600 // | A[0] | | A[2] | | A[4] | | A[6] | |
1601 // | B[0] | | B[2] | | B[4] |
1603 // Distance needs for vectorizing iterations except the last iteration:
1604 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1605 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1607 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1608 // 12, which is less than distance.
1610 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1611 // the minimum distance needed is 28, which is greater than distance. It is
1612 // not safe to do vectorization.
1613 uint64_t MinDistanceNeeded =
1614 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1615 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1616 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1617 << Distance << '\n');
1618 return Dependence::Backward;
1621 // Unsafe if the minimum distance needed is greater than max safe distance.
1622 if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1623 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1624 << MinDistanceNeeded << " size in bytes");
1625 return Dependence::Backward;
1628 // Positive distance bigger than max vectorization factor.
1629 // FIXME: Should use max factor instead of max distance in bytes, which could
1630 // not handle different types.
1631 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1632 // void foo (int *A, char *B) {
1633 // for (unsigned i = 0; i < 1024; i++) {
1634 // A[i+2] = A[i] + 1;
1635 // B[i+2] = B[i] + 1;
1639 // This case is currently unsafe according to the max safe distance. If we
1640 // analyze the two accesses on array B, the max safe dependence distance
1641 // is 2. Then we analyze the accesses on array A, the minimum distance needed
1642 // is 8, which is less than 2 and forbidden vectorization, But actually
1643 // both A and B could be vectorized by 2 iterations.
1644 MaxSafeDepDistBytes =
1645 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1647 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1648 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1649 couldPreventStoreLoadForward(Distance, TypeByteSize))
1650 return Dependence::BackwardVectorizableButPreventsForwarding;
1652 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1653 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1654 << " with max VF = " << MaxVF << '\n');
1655 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1656 MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits);
1657 return Dependence::BackwardVectorizable;
1660 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1661 MemAccessInfoList &CheckDeps,
1662 const ValueToValueMap &Strides) {
1664 MaxSafeDepDistBytes = -1;
1665 SmallPtrSet<MemAccessInfo, 8> Visited;
1666 for (MemAccessInfo CurAccess : CheckDeps) {
1667 if (Visited.count(CurAccess))
1670 // Get the relevant memory access set.
1671 EquivalenceClasses<MemAccessInfo>::iterator I =
1672 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1674 // Check accesses within this set.
1675 EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1676 AccessSets.member_begin(I);
1677 EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1678 AccessSets.member_end();
1680 // Check every access pair.
1682 Visited.insert(*AI);
1683 bool AIIsWrite = AI->getInt();
1684 // Check loads only against next equivalent class, but stores also against
1685 // other stores in the same equivalence class - to the same address.
1686 EquivalenceClasses<MemAccessInfo>::member_iterator OI =
1687 (AIIsWrite ? AI : std::next(AI));
1689 // Check every accessing instruction pair in program order.
1690 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1691 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1692 // Scan all accesses of another equivalence class, but only the next
1693 // accesses of the same equivalent class.
1694 for (std::vector<unsigned>::iterator
1695 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
1696 I2E = (OI == AI ? I1E : Accesses[*OI].end());
1698 auto A = std::make_pair(&*AI, *I1);
1699 auto B = std::make_pair(&*OI, *I2);
1705 Dependence::DepType Type =
1706 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1707 mergeInStatus(Dependence::isSafeForVectorization(Type));
1709 // Gather dependences unless we accumulated MaxDependences
1710 // dependences. In that case return as soon as we find the first
1711 // unsafe dependence. This puts a limit on this quadratic
1713 if (RecordDependences) {
1714 if (Type != Dependence::NoDep)
1715 Dependences.push_back(Dependence(A.second, B.second, Type));
1717 if (Dependences.size() >= MaxDependences) {
1718 RecordDependences = false;
1719 Dependences.clear();
1721 << "Too many dependences, stopped recording\n");
1724 if (!RecordDependences && !isSafeForVectorization())
1733 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1734 return isSafeForVectorization();
1737 SmallVector<Instruction *, 4>
1738 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1739 MemAccessInfo Access(Ptr, isWrite);
1740 auto &IndexVector = Accesses.find(Access)->second;
1742 SmallVector<Instruction *, 4> Insts;
1743 transform(IndexVector,
1744 std::back_inserter(Insts),
1745 [&](unsigned Idx) { return this->InstMap[Idx]; });
1749 const char *MemoryDepChecker::Dependence::DepName[] = {
1750 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1751 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1753 void MemoryDepChecker::Dependence::print(
1754 raw_ostream &OS, unsigned Depth,
1755 const SmallVectorImpl<Instruction *> &Instrs) const {
1756 OS.indent(Depth) << DepName[Type] << ":\n";
1757 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1758 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1761 bool LoopAccessInfo::canAnalyzeLoop() {
1762 // We need to have a loop header.
1763 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1764 << TheLoop->getHeader()->getParent()->getName() << ": "
1765 << TheLoop->getHeader()->getName() << '\n');
1767 // We can only analyze innermost loops.
1768 if (!TheLoop->empty()) {
1769 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1770 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1774 // We must have a single backedge.
1775 if (TheLoop->getNumBackEdges() != 1) {
1777 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1778 recordAnalysis("CFGNotUnderstood")
1779 << "loop control flow is not understood by analyzer";
1783 // We must have a single exiting block.
1784 if (!TheLoop->getExitingBlock()) {
1786 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1787 recordAnalysis("CFGNotUnderstood")
1788 << "loop control flow is not understood by analyzer";
1792 // We only handle bottom-tested loops, i.e. loop in which the condition is
1793 // checked at the end of each iteration. With that we can assume that all
1794 // instructions in the loop are executed the same number of times.
1795 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1797 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1798 recordAnalysis("CFGNotUnderstood")
1799 << "loop control flow is not understood by analyzer";
1803 // ScalarEvolution needs to be able to find the exit count.
1804 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1805 if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
1806 recordAnalysis("CantComputeNumberOfIterations")
1807 << "could not determine number of loop iterations";
1808 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1815 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
1816 const TargetLibraryInfo *TLI,
1817 DominatorTree *DT) {
1818 typedef SmallPtrSet<Value*, 16> ValueSet;
1820 // Holds the Load and Store instructions.
1821 SmallVector<LoadInst *, 16> Loads;
1822 SmallVector<StoreInst *, 16> Stores;
1824 // Holds all the different accesses in the loop.
1825 unsigned NumReads = 0;
1826 unsigned NumReadWrites = 0;
1828 bool HasComplexMemInst = false;
1830 // A runtime check is only legal to insert if there are no convergent calls.
1831 HasConvergentOp = false;
1833 PtrRtChecking->Pointers.clear();
1834 PtrRtChecking->Need = false;
1836 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1838 const bool EnableMemAccessVersioningOfLoop =
1839 EnableMemAccessVersioning &&
1840 !TheLoop->getHeader()->getParent()->hasOptSize();
1843 for (BasicBlock *BB : TheLoop->blocks()) {
1844 // Scan the BB and collect legal loads and stores. Also detect any
1845 // convergent instructions.
1846 for (Instruction &I : *BB) {
1847 if (auto *Call = dyn_cast<CallBase>(&I)) {
1848 if (Call->isConvergent())
1849 HasConvergentOp = true;
1852 // With both a non-vectorizable memory instruction and a convergent
1853 // operation, found in this loop, no reason to continue the search.
1854 if (HasComplexMemInst && HasConvergentOp) {
1859 // Avoid hitting recordAnalysis multiple times.
1860 if (HasComplexMemInst)
1863 // If this is a load, save it. If this instruction can read from memory
1864 // but is not a load, then we quit. Notice that we don't handle function
1865 // calls that read or write.
1866 if (I.mayReadFromMemory()) {
1867 // Many math library functions read the rounding mode. We will only
1868 // vectorize a loop if it contains known function calls that don't set
1869 // the flag. Therefore, it is safe to ignore this read from memory.
1870 auto *Call = dyn_cast<CallInst>(&I);
1871 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1874 // If the function has an explicit vectorized counterpart, we can safely
1875 // assume that it can be vectorized.
1876 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1877 !VFDatabase::getMappings(*Call).empty())
1880 auto *Ld = dyn_cast<LoadInst>(&I);
1882 recordAnalysis("CantVectorizeInstruction", Ld)
1883 << "instruction cannot be vectorized";
1884 HasComplexMemInst = true;
1887 if (!Ld->isSimple() && !IsAnnotatedParallel) {
1888 recordAnalysis("NonSimpleLoad", Ld)
1889 << "read with atomic ordering or volatile read";
1890 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1891 HasComplexMemInst = true;
1895 Loads.push_back(Ld);
1896 DepChecker->addAccess(Ld);
1897 if (EnableMemAccessVersioningOfLoop)
1898 collectStridedAccess(Ld);
1902 // Save 'store' instructions. Abort if other instructions write to memory.
1903 if (I.mayWriteToMemory()) {
1904 auto *St = dyn_cast<StoreInst>(&I);
1906 recordAnalysis("CantVectorizeInstruction", St)
1907 << "instruction cannot be vectorized";
1908 HasComplexMemInst = true;
1911 if (!St->isSimple() && !IsAnnotatedParallel) {
1912 recordAnalysis("NonSimpleStore", St)
1913 << "write with atomic ordering or volatile write";
1914 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1915 HasComplexMemInst = true;
1919 Stores.push_back(St);
1920 DepChecker->addAccess(St);
1921 if (EnableMemAccessVersioningOfLoop)
1922 collectStridedAccess(St);
1927 if (HasComplexMemInst) {
1932 // Now we have two lists that hold the loads and the stores.
1933 // Next, we find the pointers that they use.
1935 // Check if we see any stores. If there are no stores, then we don't
1936 // care if the pointers are *restrict*.
1937 if (!Stores.size()) {
1938 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1943 MemoryDepChecker::DepCandidates DependentAccesses;
1944 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1945 TheLoop, AA, LI, DependentAccesses, *PSE);
1947 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1948 // multiple times on the same object. If the ptr is accessed twice, once
1949 // for read and once for write, it will only appear once (on the write
1950 // list). This is okay, since we are going to check for conflicts between
1951 // writes and between reads and writes, but not between reads and reads.
1954 // Record uniform store addresses to identify if we have multiple stores
1955 // to the same address.
1956 ValueSet UniformStores;
1958 for (StoreInst *ST : Stores) {
1959 Value *Ptr = ST->getPointerOperand();
1962 HasDependenceInvolvingLoopInvariantAddress |=
1963 !UniformStores.insert(Ptr).second;
1965 // If we did *not* see this pointer before, insert it to the read-write
1966 // list. At this phase it is only a 'write' list.
1967 if (Seen.insert(Ptr).second) {
1970 MemoryLocation Loc = MemoryLocation::get(ST);
1971 // The TBAA metadata could have a control dependency on the predication
1972 // condition, so we cannot rely on it when determining whether or not we
1973 // need runtime pointer checks.
1974 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1975 Loc.AATags.TBAA = nullptr;
1977 Accesses.addStore(Loc);
1981 if (IsAnnotatedParallel) {
1983 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
1989 for (LoadInst *LD : Loads) {
1990 Value *Ptr = LD->getPointerOperand();
1991 // If we did *not* see this pointer before, insert it to the
1992 // read list. If we *did* see it before, then it is already in
1993 // the read-write list. This allows us to vectorize expressions
1994 // such as A[i] += x; Because the address of A[i] is a read-write
1995 // pointer. This only works if the index of A[i] is consecutive.
1996 // If the address of i is unknown (for example A[B[i]]) then we may
1997 // read a few words, modify, and write a few words, and some of the
1998 // words may be written to the same address.
1999 bool IsReadOnlyPtr = false;
2000 if (Seen.insert(Ptr).second ||
2001 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
2003 IsReadOnlyPtr = true;
2006 // See if there is an unsafe dependency between a load to a uniform address and
2007 // store to the same uniform address.
2008 if (UniformStores.count(Ptr)) {
2009 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2010 "load and uniform store to the same address!\n");
2011 HasDependenceInvolvingLoopInvariantAddress = true;
2014 MemoryLocation Loc = MemoryLocation::get(LD);
2015 // The TBAA metadata could have a control dependency on the predication
2016 // condition, so we cannot rely on it when determining whether or not we
2017 // need runtime pointer checks.
2018 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2019 Loc.AATags.TBAA = nullptr;
2021 Accesses.addLoad(Loc, IsReadOnlyPtr);
2024 // If we write (or read-write) to a single destination and there are no
2025 // other reads in this loop then is it safe to vectorize.
2026 if (NumReadWrites == 1 && NumReads == 0) {
2027 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2032 // Build dependence sets and check whether we need a runtime pointer bounds
2034 Accesses.buildDependenceSets();
2036 // Find pointers with computable bounds. We are going to use this information
2037 // to place a runtime bound check.
2038 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
2039 TheLoop, SymbolicStrides);
2040 if (!CanDoRTIfNeeded) {
2041 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
2042 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2043 << "the array bounds.\n");
2049 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2052 if (Accesses.isDependencyCheckNeeded()) {
2053 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2054 CanVecMem = DepChecker->areDepsSafe(
2055 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2056 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2058 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2059 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2061 // Clear the dependency checks. We assume they are not needed.
2062 Accesses.resetDepChecks(*DepChecker);
2064 PtrRtChecking->reset();
2065 PtrRtChecking->Need = true;
2067 auto *SE = PSE->getSE();
2068 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
2069 SymbolicStrides, true);
2071 // Check that we found the bounds for the pointer.
2072 if (!CanDoRTIfNeeded) {
2073 recordAnalysis("CantCheckMemDepsAtRunTime")
2074 << "cannot check memory dependencies at runtime";
2075 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2084 if (HasConvergentOp) {
2085 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2086 << "cannot add control dependency to convergent operation";
2087 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2088 "would be needed with a convergent operation\n");
2095 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2096 << (PtrRtChecking->Need ? "" : " don't")
2097 << " need runtime memory checks.\n");
2099 recordAnalysis("UnsafeMemDep")
2100 << "unsafe dependent memory operations in loop. Use "
2101 "#pragma loop distribute(enable) to allow loop distribution "
2102 "to attempt to isolate the offending operations into a separate "
2104 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2108 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2109 DominatorTree *DT) {
2110 assert(TheLoop->contains(BB) && "Unknown block used");
2112 // Blocks that do not dominate the latch need predication.
2113 BasicBlock* Latch = TheLoop->getLoopLatch();
2114 return !DT->dominates(BB, Latch);
2117 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2119 assert(!Report && "Multiple reports generated");
2121 Value *CodeRegion = TheLoop->getHeader();
2122 DebugLoc DL = TheLoop->getStartLoc();
2125 CodeRegion = I->getParent();
2126 // If there is no debug location attached to the instruction, revert back to
2127 // using the loop's.
2128 if (I->getDebugLoc())
2129 DL = I->getDebugLoc();
2132 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2137 bool LoopAccessInfo::isUniform(Value *V) const {
2138 auto *SE = PSE->getSE();
2139 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2140 // never considered uniform.
2141 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2142 // trivially loop-invariant FP values to be considered uniform.
2143 if (!SE->isSCEVable(V->getType()))
2145 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2148 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2149 Value *Ptr = nullptr;
2150 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
2151 Ptr = LI->getPointerOperand();
2152 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
2153 Ptr = SI->getPointerOperand();
2157 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2161 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2163 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2165 // Avoid adding the "Stride == 1" predicate when we know that
2166 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2167 // or zero iteration loop, as Trip-Count <= Stride == 1.
2169 // TODO: We are currently not making a very informed decision on when it is
2170 // beneficial to apply stride versioning. It might make more sense that the
2171 // users of this analysis (such as the vectorizer) will trigger it, based on
2172 // their specific cost considerations; For example, in cases where stride
2173 // versioning does not help resolving memory accesses/dependences, the
2174 // vectorizer should evaluate the cost of the runtime test, and the benefit
2175 // of various possible stride specializations, considering the alternatives
2176 // of using gather/scatters (if available).
2178 const SCEV *StrideExpr = PSE->getSCEV(Stride);
2179 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2181 // Match the types so we can compare the stride and the BETakenCount.
2182 // The Stride can be positive/negative, so we sign extend Stride;
2183 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2184 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2185 uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
2186 uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
2187 const SCEV *CastedStride = StrideExpr;
2188 const SCEV *CastedBECount = BETakenCount;
2189 ScalarEvolution *SE = PSE->getSE();
2190 if (BETypeSize >= StrideTypeSize)
2191 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2193 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2194 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2195 // Since TripCount == BackEdgeTakenCount + 1, checking:
2196 // "Stride >= TripCount" is equivalent to checking:
2197 // Stride - BETakenCount > 0
2198 if (SE->isKnownPositive(StrideMinusBETaken)) {
2200 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2201 "Stride==1 predicate will imply that the loop executes "
2205 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
2207 SymbolicStrides[Ptr] = Stride;
2208 StrideSet.insert(Stride);
2211 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2212 const TargetLibraryInfo *TLI, AAResults *AA,
2213 DominatorTree *DT, LoopInfo *LI)
2214 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2215 PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)),
2216 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
2217 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2218 HasConvergentOp(false),
2219 HasDependenceInvolvingLoopInvariantAddress(false) {
2220 if (canAnalyzeLoop())
2221 analyzeLoop(AA, LI, TLI, DT);
2224 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2226 OS.indent(Depth) << "Memory dependences are safe";
2227 if (MaxSafeDepDistBytes != -1ULL)
2228 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2230 if (PtrRtChecking->Need)
2231 OS << " with run-time checks";
2235 if (HasConvergentOp)
2236 OS.indent(Depth) << "Has convergent operation in loop\n";
2239 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2241 if (auto *Dependences = DepChecker->getDependences()) {
2242 OS.indent(Depth) << "Dependences:\n";
2243 for (auto &Dep : *Dependences) {
2244 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2248 OS.indent(Depth) << "Too many dependences, not recorded\n";
2250 // List the pair of accesses need run-time checks to prove independence.
2251 PtrRtChecking->print(OS, Depth);
2254 OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2255 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2256 << "found in loop.\n";
2258 OS.indent(Depth) << "SCEV assumptions:\n";
2259 PSE->getUnionPredicate().print(OS, Depth);
2263 OS.indent(Depth) << "Expressions re-written:\n";
2264 PSE->print(OS, Depth);
2267 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) {
2268 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
2271 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2272 auto &LAI = LoopAccessInfoMap[L];
2275 LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2280 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2281 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2283 for (Loop *TopLevelLoop : *LI)
2284 for (Loop *L : depth_first(TopLevelLoop)) {
2285 OS.indent(2) << L->getHeader()->getName() << ":\n";
2286 auto &LAI = LAA.getInfo(L);
2291 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2292 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2293 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2294 TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2295 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2296 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2297 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2302 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2303 AU.addRequired<ScalarEvolutionWrapperPass>();
2304 AU.addRequired<AAResultsWrapperPass>();
2305 AU.addRequired<DominatorTreeWrapperPass>();
2306 AU.addRequired<LoopInfoWrapperPass>();
2308 AU.setPreservesAll();
2311 char LoopAccessLegacyAnalysis::ID = 0;
2312 static const char laa_name[] = "Loop Access Analysis";
2313 #define LAA_NAME "loop-accesses"
2315 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2316 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2317 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2318 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2319 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2320 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2322 AnalysisKey LoopAccessAnalysis::Key;
2324 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2325 LoopStandardAnalysisResults &AR) {
2326 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2331 Pass *createLAAPass() {
2332 return new LoopAccessLegacyAnalysis();
2335 } // end namespace llvm