1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The implementation for the loop memory dependence that was originally
11 // developed for the loop vectorizer.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LoopAccessAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/EquivalenceClasses.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AliasSetTracker.h"
29 #include "llvm/Analysis/LoopAnalysisManager.h"
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
33 #include "llvm/Analysis/ScalarEvolution.h"
34 #include "llvm/Analysis/ScalarEvolutionExpander.h"
35 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/Operator.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/IR/ValueHandle.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/raw_ostream.h"
72 #define DEBUG_TYPE "loop-accesses"
74 static cl::opt<unsigned, true>
75 VectorizationFactor("force-vector-width", cl::Hidden,
76 cl::desc("Sets the SIMD width. Zero is autoselect."),
77 cl::location(VectorizerParams::VectorizationFactor));
78 unsigned VectorizerParams::VectorizationFactor;
80 static cl::opt<unsigned, true>
81 VectorizationInterleave("force-vector-interleave", cl::Hidden,
82 cl::desc("Sets the vectorization interleave count. "
83 "Zero is autoselect."),
85 VectorizerParams::VectorizationInterleave));
86 unsigned VectorizerParams::VectorizationInterleave;
88 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
89 "runtime-memory-check-threshold", cl::Hidden,
90 cl::desc("When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
92 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
93 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
95 /// \brief The maximum iterations used to merge memory checks
96 static cl::opt<unsigned> MemoryCheckMergeThreshold(
97 "memory-check-merge-threshold", cl::Hidden,
98 cl::desc("Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
102 /// Maximum SIMD width.
103 const unsigned VectorizerParams::MaxVectorWidth = 64;
105 /// \brief We collect dependences up to this threshold.
106 static cl::opt<unsigned>
107 MaxDependences("max-dependences", cl::Hidden,
108 cl::desc("Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
112 /// This enables versioning on the strides of symbolically striding memory
113 /// accesses in code like the following.
114 /// for (i = 0; i < N; ++i)
115 /// A[i * Stride1] += B[i * Stride2] ...
117 /// Will be roughly translated to
118 /// if (Stride1 == 1 && Stride2 == 1) {
119 /// for (i = 0; i < N; i+=4)
123 static cl::opt<bool> EnableMemAccessVersioning(
124 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125 cl::desc("Enable symbolic stride memory access versioning"));
127 /// \brief Enable store-to-load forwarding conflict detection. This option can
128 /// be disabled for correctness testing.
129 static cl::opt<bool> EnableForwardingConflictDetection(
130 "store-to-load-forwarding-conflict-detection", cl::Hidden,
131 cl::desc("Enable conflict detection in loop-access analysis"),
134 bool VectorizerParams::isInterleaveForced() {
135 return ::VectorizationInterleave.getNumOccurrences() > 0;
138 Value *llvm::stripIntegerCast(Value *V) {
139 if (auto *CI = dyn_cast<CastInst>(V))
140 if (CI->getOperand(0)->getType()->isIntegerTy())
141 return CI->getOperand(0);
145 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
146 const ValueToValueMap &PtrToStride,
147 Value *Ptr, Value *OrigPtr) {
148 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
150 // If there is an entry in the map return the SCEV of the pointer with the
151 // symbolic stride replaced by one.
152 ValueToValueMap::const_iterator SI =
153 PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
154 if (SI != PtrToStride.end()) {
155 Value *StrideVal = SI->second;
158 StrideVal = stripIntegerCast(StrideVal);
160 ScalarEvolution *SE = PSE.getSE();
161 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
163 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
165 PSE.addPredicate(*SE->getEqualPredicate(U, CT));
166 auto *Expr = PSE.getSCEV(Ptr);
168 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr
173 // Otherwise, just return the SCEV of the original pointer.
177 /// Calculate Start and End points of memory access.
178 /// Let's assume A is the first access and B is a memory access on N-th loop
179 /// iteration. Then B is calculated as:
181 /// Step value may be positive or negative.
182 /// N is a calculated back-edge taken count:
183 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
184 /// Start and End points are calculated in the following way:
185 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
186 /// where SizeOfElt is the size of single memory access in bytes.
188 /// There is no conflict when the intervals are disjoint:
189 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
190 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
191 unsigned DepSetId, unsigned ASId,
192 const ValueToValueMap &Strides,
193 PredicatedScalarEvolution &PSE) {
194 // Get the stride replaced scev.
195 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
196 ScalarEvolution *SE = PSE.getSE();
201 if (SE->isLoopInvariant(Sc, Lp))
202 ScStart = ScEnd = Sc;
204 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
205 assert(AR && "Invalid addrec expression");
206 const SCEV *Ex = PSE.getBackedgeTakenCount();
208 ScStart = AR->getStart();
209 ScEnd = AR->evaluateAtIteration(Ex, *SE);
210 const SCEV *Step = AR->getStepRecurrence(*SE);
212 // For expressions with negative step, the upper bound is ScStart and the
213 // lower bound is ScEnd.
214 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
215 if (CStep->getValue()->isNegative())
216 std::swap(ScStart, ScEnd);
218 // Fallback case: the step is not constant, but we can still
219 // get the upper and lower bounds of the interval by using min/max
221 ScStart = SE->getUMinExpr(ScStart, ScEnd);
222 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
224 // Add the size of the pointed element to ScEnd.
226 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8;
227 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize);
228 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
231 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
234 SmallVector<RuntimePointerChecking::PointerCheck, 4>
235 RuntimePointerChecking::generateChecks() const {
236 SmallVector<PointerCheck, 4> Checks;
238 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
239 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
240 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I];
241 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J];
243 if (needsChecking(CGI, CGJ))
244 Checks.push_back(std::make_pair(&CGI, &CGJ));
250 void RuntimePointerChecking::generateChecks(
251 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
252 assert(Checks.empty() && "Checks is not empty");
253 groupChecks(DepCands, UseDependencies);
254 Checks = generateChecks();
257 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M,
258 const CheckingPtrGroup &N) const {
259 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
260 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
261 if (needsChecking(M.Members[I], N.Members[J]))
266 /// Compare \p I and \p J and return the minimum.
267 /// Return nullptr in case we couldn't find an answer.
268 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
269 ScalarEvolution *SE) {
270 const SCEV *Diff = SE->getMinusSCEV(J, I);
271 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
275 if (C->getValue()->isNegative())
280 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) {
281 const SCEV *Start = RtCheck.Pointers[Index].Start;
282 const SCEV *End = RtCheck.Pointers[Index].End;
284 // Compare the starts and ends with the known minimum and maximum
285 // of this set. We need to know how we compare against the min/max
286 // of the set in order to be able to emit memchecks.
287 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
291 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
295 // Update the low bound expression if we've found a new min value.
299 // Update the high bound expression if we've found a new max value.
303 Members.push_back(Index);
307 void RuntimePointerChecking::groupChecks(
308 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
309 // We build the groups from dependency candidates equivalence classes
311 // - We know that pointers in the same equivalence class share
312 // the same underlying object and therefore there is a chance
313 // that we can compare pointers
314 // - We wouldn't be able to merge two pointers for which we need
315 // to emit a memcheck. The classes in DepCands are already
316 // conveniently built such that no two pointers in the same
317 // class need checking against each other.
319 // We use the following (greedy) algorithm to construct the groups
320 // For every pointer in the equivalence class:
321 // For each existing group:
322 // - if the difference between this pointer and the min/max bounds
323 // of the group is a constant, then make the pointer part of the
324 // group and update the min/max bounds of that group as required.
326 CheckingGroups.clear();
328 // If we need to check two pointers to the same underlying object
329 // with a non-constant difference, we shouldn't perform any pointer
330 // grouping with those pointers. This is because we can easily get
331 // into cases where the resulting check would return false, even when
332 // the accesses are safe.
334 // The following example shows this:
335 // for (i = 0; i < 1000; ++i)
336 // a[5000 + i * m] = a[i] + a[i + 9000]
338 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
339 // (0, 10000) which is always false. However, if m is 1, there is no
340 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
341 // us to perform an accurate check in this case.
343 // The above case requires that we have an UnknownDependence between
344 // accesses to the same underlying object. This cannot happen unless
345 // ShouldRetryWithRuntimeCheck is set, and therefore UseDependencies
346 // is also false. In this case we will use the fallback path and create
347 // separate checking groups for all pointers.
349 // If we don't have the dependency partitions, construct a new
350 // checking pointer group for each pointer. This is also required
351 // for correctness, because in this case we can have checking between
352 // pointers to the same underlying object.
353 if (!UseDependencies) {
354 for (unsigned I = 0; I < Pointers.size(); ++I)
355 CheckingGroups.push_back(CheckingPtrGroup(I, *this));
359 unsigned TotalComparisons = 0;
361 DenseMap<Value *, unsigned> PositionMap;
362 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
363 PositionMap[Pointers[Index].PointerValue] = Index;
365 // We need to keep track of what pointers we've already seen so we
366 // don't process them twice.
367 SmallSet<unsigned, 2> Seen;
369 // Go through all equivalence classes, get the "pointer check groups"
370 // and add them to the overall solution. We use the order in which accesses
371 // appear in 'Pointers' to enforce determinism.
372 for (unsigned I = 0; I < Pointers.size(); ++I) {
373 // We've seen this pointer before, and therefore already processed
374 // its equivalence class.
378 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
379 Pointers[I].IsWritePtr);
381 SmallVector<CheckingPtrGroup, 2> Groups;
382 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
384 // Because DepCands is constructed by visiting accesses in the order in
385 // which they appear in alias sets (which is deterministic) and the
386 // iteration order within an equivalence class member is only dependent on
387 // the order in which unions and insertions are performed on the
388 // equivalence class, the iteration order is deterministic.
389 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
391 unsigned Pointer = PositionMap[MI->getPointer()];
393 // Mark this pointer as seen.
394 Seen.insert(Pointer);
396 // Go through all the existing sets and see if we can find one
397 // which can include this pointer.
398 for (CheckingPtrGroup &Group : Groups) {
399 // Don't perform more than a certain amount of comparisons.
400 // This should limit the cost of grouping the pointers to something
401 // reasonable. If we do end up hitting this threshold, the algorithm
402 // will create separate groups for all remaining pointers.
403 if (TotalComparisons > MemoryCheckMergeThreshold)
408 if (Group.addPointer(Pointer)) {
415 // We couldn't add this pointer to any existing set or the threshold
416 // for the number of comparisons has been reached. Create a new group
417 // to hold the current pointer.
418 Groups.push_back(CheckingPtrGroup(Pointer, *this));
421 // We've computed the grouped checks for this partition.
422 // Save the results and continue with the next one.
423 std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups));
427 bool RuntimePointerChecking::arePointersInSamePartition(
428 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
430 return (PtrToPartition[PtrIdx1] != -1 &&
431 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
434 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
435 const PointerInfo &PointerI = Pointers[I];
436 const PointerInfo &PointerJ = Pointers[J];
438 // No need to check if two readonly pointers intersect.
439 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
442 // Only need to check pointers between two different dependency sets.
443 if (PointerI.DependencySetId == PointerJ.DependencySetId)
446 // Only need to check pointers in the same alias set.
447 if (PointerI.AliasSetId != PointerJ.AliasSetId)
453 void RuntimePointerChecking::printChecks(
454 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
455 unsigned Depth) const {
457 for (const auto &Check : Checks) {
458 const auto &First = Check.first->Members, &Second = Check.second->Members;
460 OS.indent(Depth) << "Check " << N++ << ":\n";
462 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
463 for (unsigned K = 0; K < First.size(); ++K)
464 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
466 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
467 for (unsigned K = 0; K < Second.size(); ++K)
468 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
472 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
474 OS.indent(Depth) << "Run-time memory checks:\n";
475 printChecks(OS, Checks, Depth);
477 OS.indent(Depth) << "Grouped accesses:\n";
478 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
479 const auto &CG = CheckingGroups[I];
481 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
482 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
484 for (unsigned J = 0; J < CG.Members.size(); ++J) {
485 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
493 /// \brief Analyses memory accesses in a loop.
495 /// Checks whether run time pointer checks are needed and builds sets for data
496 /// dependence checking.
497 class AccessAnalysis {
499 /// \brief Read or write access location.
500 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
501 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
503 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI,
504 MemoryDepChecker::DepCandidates &DA,
505 PredicatedScalarEvolution &PSE)
506 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false),
509 /// \brief Register a load and whether it is only read from.
510 void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
511 Value *Ptr = const_cast<Value*>(Loc.Ptr);
512 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
513 Accesses.insert(MemAccessInfo(Ptr, false));
515 ReadOnlyPtr.insert(Ptr);
518 /// \brief Register a store.
519 void addStore(MemoryLocation &Loc) {
520 Value *Ptr = const_cast<Value*>(Loc.Ptr);
521 AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
522 Accesses.insert(MemAccessInfo(Ptr, true));
525 /// \brief Check whether we can check the pointers at runtime for
526 /// non-intersection.
528 /// Returns true if we need no check or if we do and we can generate them
529 /// (i.e. the pointers have computable bounds).
530 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
531 Loop *TheLoop, const ValueToValueMap &Strides,
532 bool ShouldCheckWrap = false);
534 /// \brief Goes over all memory accesses, checks whether a RT check is needed
535 /// and builds sets of dependent accesses.
536 void buildDependenceSets() {
537 processMemAccesses();
540 /// \brief Initial processing of memory accesses determined that we need to
541 /// perform dependency checking.
543 /// Note that this can later be cleared if we retry memcheck analysis without
544 /// dependency checking (i.e. ShouldRetryWithRuntimeCheck).
545 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
547 /// We decided that no dependence analysis would be used. Reset the state.
548 void resetDepChecks(MemoryDepChecker &DepChecker) {
550 DepChecker.clearDependences();
553 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
556 typedef SetVector<MemAccessInfo> PtrAccessSet;
558 /// \brief Go over all memory access and check whether runtime pointer checks
559 /// are needed and build sets of dependency check candidates.
560 void processMemAccesses();
562 /// Set of all accesses.
563 PtrAccessSet Accesses;
565 const DataLayout &DL;
567 /// List of accesses that need a further dependence check.
568 MemAccessInfoList CheckDeps;
570 /// Set of pointers that are read only.
571 SmallPtrSet<Value*, 16> ReadOnlyPtr;
573 /// An alias set tracker to partition the access set by underlying object and
574 //intrinsic property (such as TBAA metadata).
579 /// Sets of potentially dependent accesses - members of one set share an
580 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
581 /// dependence check.
582 MemoryDepChecker::DepCandidates &DepCands;
584 /// \brief Initial processing of memory accesses determined that we may need
585 /// to add memchecks. Perform the analysis to determine the necessary checks.
587 /// Note that, this is different from isDependencyCheckNeeded. When we retry
588 /// memcheck analysis without dependency checking
589 /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared
590 /// while this remains set if we have potentially dependent accesses.
591 bool IsRTCheckAnalysisNeeded;
593 /// The SCEV predicate containing all the SCEV-related assumptions.
594 PredicatedScalarEvolution &PSE;
597 } // end anonymous namespace
599 /// \brief Check whether a pointer can participate in a runtime bounds check.
600 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
601 const ValueToValueMap &Strides, Value *Ptr,
603 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
605 // The bounds for loop-invariant pointer is trivial.
606 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
609 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
613 return AR->isAffine();
616 /// \brief Check whether a pointer address cannot wrap.
617 static bool isNoWrap(PredicatedScalarEvolution &PSE,
618 const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
619 const SCEV *PtrScev = PSE.getSCEV(Ptr);
620 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
623 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
627 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
628 ScalarEvolution *SE, Loop *TheLoop,
629 const ValueToValueMap &StridesMap,
630 bool ShouldCheckWrap) {
631 // Find pointers with computable bounds. We are going to use this information
632 // to place a runtime bound check.
635 bool NeedRTCheck = false;
636 if (!IsRTCheckAnalysisNeeded) return true;
638 bool IsDepCheckNeeded = isDependencyCheckNeeded();
640 // We assign a consecutive id to access from different alias sets.
641 // Accesses between different groups doesn't need to be checked.
643 for (auto &AS : AST) {
644 int NumReadPtrChecks = 0;
645 int NumWritePtrChecks = 0;
647 // We assign consecutive id to access from different dependence sets.
648 // Accesses within the same set don't need a runtime check.
649 unsigned RunningDepId = 1;
650 DenseMap<Value *, unsigned> DepSetId;
653 Value *Ptr = A.getValue();
654 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
655 MemAccessInfo Access(Ptr, IsWrite);
662 if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) &&
663 // When we run after a failing dependency check we have to make sure
664 // we don't have wrapping pointers.
665 (!ShouldCheckWrap || isNoWrap(PSE, StridesMap, Ptr, TheLoop))) {
666 // The id of the dependence set.
669 if (IsDepCheckNeeded) {
670 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
671 unsigned &LeaderId = DepSetId[Leader];
673 LeaderId = RunningDepId++;
676 // Each access has its own dependence set.
677 DepId = RunningDepId++;
679 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
681 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
683 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
688 // If we have at least two writes or one write and a read then we need to
689 // check them. But there is no need to checks if there is only one
690 // dependence set for this alias set.
692 // Note that this function computes CanDoRT and NeedRTCheck independently.
693 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer
694 // for which we couldn't find the bounds but we don't actually need to emit
695 // any checks so it does not matter.
696 if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2))
697 NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 &&
698 NumWritePtrChecks >= 1));
703 // If the pointers that we would use for the bounds comparison have different
704 // address spaces, assume the values aren't directly comparable, so we can't
705 // use them for the runtime check. We also have to assume they could
706 // overlap. In the future there should be metadata for whether address spaces
708 unsigned NumPointers = RtCheck.Pointers.size();
709 for (unsigned i = 0; i < NumPointers; ++i) {
710 for (unsigned j = i + 1; j < NumPointers; ++j) {
711 // Only need to check pointers between two different dependency sets.
712 if (RtCheck.Pointers[i].DependencySetId ==
713 RtCheck.Pointers[j].DependencySetId)
715 // Only need to check pointers in the same alias set.
716 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
719 Value *PtrI = RtCheck.Pointers[i].PointerValue;
720 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
722 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
723 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
725 DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
726 " different address spaces\n");
732 if (NeedRTCheck && CanDoRT)
733 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
735 DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
736 << " pointer comparisons.\n");
738 RtCheck.Need = NeedRTCheck;
740 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT;
741 if (!CanDoRTIfNeeded)
743 return CanDoRTIfNeeded;
746 void AccessAnalysis::processMemAccesses() {
747 // We process the set twice: first we process read-write pointers, last we
748 // process read-only pointers. This allows us to skip dependence tests for
749 // read-only pointers.
751 DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
752 DEBUG(dbgs() << " AST: "; AST.dump());
753 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
755 for (auto A : Accesses)
756 dbgs() << "\t" << *A.getPointer() << " (" <<
757 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
758 "read-only" : "read")) << ")\n";
761 // The AliasSetTracker has nicely partitioned our pointers by metadata
762 // compatibility and potential for underlying-object overlap. As a result, we
763 // only need to check for potential pointer dependencies within each alias
765 for (auto &AS : AST) {
766 // Note that both the alias-set tracker and the alias sets themselves used
767 // linked lists internally and so the iteration order here is deterministic
768 // (matching the original instruction order within each set).
770 bool SetHasWrite = false;
772 // Map of pointers to last access encountered.
773 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
774 UnderlyingObjToAccessMap ObjToLastAccess;
776 // Set of access to check after all writes have been processed.
777 PtrAccessSet DeferredAccesses;
779 // Iterate over each alias set twice, once to process read/write pointers,
780 // and then to process read-only pointers.
781 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
782 bool UseDeferred = SetIteration > 0;
783 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
786 Value *Ptr = AV.getValue();
788 // For a single memory access in AliasSetTracker, Accesses may contain
789 // both read and write, and they both need to be handled for CheckDeps.
791 if (AC.getPointer() != Ptr)
794 bool IsWrite = AC.getInt();
796 // If we're using the deferred access set, then it contains only
798 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
799 if (UseDeferred && !IsReadOnlyPtr)
801 // Otherwise, the pointer must be in the PtrAccessSet, either as a
803 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
804 S.count(MemAccessInfo(Ptr, false))) &&
805 "Alias-set pointer not in the access set?");
807 MemAccessInfo Access(Ptr, IsWrite);
808 DepCands.insert(Access);
810 // Memorize read-only pointers for later processing and skip them in
811 // the first round (they need to be checked after we have seen all
812 // write pointers). Note: we also mark pointer that are not
813 // consecutive as "read-only" pointers (so that we check
814 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
815 if (!UseDeferred && IsReadOnlyPtr) {
816 DeferredAccesses.insert(Access);
820 // If this is a write - check other reads and writes for conflicts. If
821 // this is a read only check other writes for conflicts (but only if
822 // there is no other write to the ptr - this is an optimization to
823 // catch "a[i] = a[i] + " without having to do a dependence check).
824 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
825 CheckDeps.push_back(Access);
826 IsRTCheckAnalysisNeeded = true;
832 // Create sets of pointers connected by a shared alias set and
833 // underlying object.
834 typedef SmallVector<Value *, 16> ValueVector;
835 ValueVector TempObjects;
837 GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
838 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n");
839 for (Value *UnderlyingObj : TempObjects) {
840 // nullptr never alias, don't join sets for pointer that have "null"
841 // in their UnderlyingObjects list.
842 if (isa<ConstantPointerNull>(UnderlyingObj))
845 UnderlyingObjToAccessMap::iterator Prev =
846 ObjToLastAccess.find(UnderlyingObj);
847 if (Prev != ObjToLastAccess.end())
848 DepCands.unionSets(Access, Prev->second);
850 ObjToLastAccess[UnderlyingObj] = Access;
851 DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
859 static bool isInBoundsGep(Value *Ptr) {
860 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
861 return GEP->isInBounds();
865 /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
866 /// i.e. monotonically increasing/decreasing.
867 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
868 PredicatedScalarEvolution &PSE, const Loop *L) {
869 // FIXME: This should probably only return true for NUW.
870 if (AR->getNoWrapFlags(SCEV::NoWrapMask))
873 // Scalar evolution does not propagate the non-wrapping flags to values that
874 // are derived from a non-wrapping induction variable because non-wrapping
875 // could be flow-sensitive.
877 // Look through the potentially overflowing instruction to try to prove
878 // non-wrapping for the *specific* value of Ptr.
880 // The arithmetic implied by an inbounds GEP can't overflow.
881 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
882 if (!GEP || !GEP->isInBounds())
885 // Make sure there is only one non-const index and analyze that.
886 Value *NonConstIndex = nullptr;
887 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end()))
888 if (!isa<ConstantInt>(Index)) {
891 NonConstIndex = Index;
894 // The recurrence is on the pointer, ignore for now.
897 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
898 // AddRec using a NSW operation.
899 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
900 if (OBO->hasNoSignedWrap() &&
901 // Assume constant for other the operand so that the AddRec can be
903 isa<ConstantInt>(OBO->getOperand(1))) {
904 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
906 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
907 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
913 /// \brief Check whether the access through \p Ptr has a constant stride.
914 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
915 const Loop *Lp, const ValueToValueMap &StridesMap,
916 bool Assume, bool ShouldCheckWrap) {
917 Type *Ty = Ptr->getType();
918 assert(Ty->isPointerTy() && "Unexpected non-ptr");
920 // Make sure that the pointer does not point to aggregate types.
921 auto *PtrTy = cast<PointerType>(Ty);
922 if (PtrTy->getElementType()->isAggregateType()) {
923 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr
928 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
930 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
932 AR = PSE.getAsAddRec(Ptr);
935 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
936 << " SCEV: " << *PtrScev << "\n");
940 // The accesss function must stride over the innermost loop.
941 if (Lp != AR->getLoop()) {
942 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
943 *Ptr << " SCEV: " << *AR << "\n");
947 // The address calculation must not wrap. Otherwise, a dependence could be
949 // An inbounds getelementptr that is a AddRec with a unit stride
950 // cannot wrap per definition. The unit stride requirement is checked later.
951 // An getelementptr without an inbounds attribute and unit stride would have
952 // to access the pointer value "0" which is undefined behavior in address
953 // space 0, therefore we can also vectorize this case.
954 bool IsInBoundsGEP = isInBoundsGep(Ptr);
955 bool IsNoWrapAddRec = !ShouldCheckWrap ||
956 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
957 isNoWrapAddRec(Ptr, AR, PSE, Lp);
958 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
959 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
961 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
962 IsNoWrapAddRec = true;
963 DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
964 << "LAA: Pointer: " << *Ptr << "\n"
965 << "LAA: SCEV: " << *AR << "\n"
966 << "LAA: Added an overflow assumption\n");
968 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
969 << *Ptr << " SCEV: " << *AR << "\n");
974 // Check the step is constant.
975 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
977 // Calculate the pointer stride and check if it is constant.
978 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
980 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
981 " SCEV: " << *AR << "\n");
985 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
986 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
987 const APInt &APStepVal = C->getAPInt();
989 // Huge step value - give up.
990 if (APStepVal.getBitWidth() > 64)
993 int64_t StepVal = APStepVal.getSExtValue();
996 int64_t Stride = StepVal / Size;
997 int64_t Rem = StepVal % Size;
1001 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1002 // know we can't "wrap around the address space". In case of address space
1003 // zero we know that this won't happen without triggering undefined behavior.
1004 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
1005 Stride != 1 && Stride != -1) {
1007 // We can avoid this case by adding a run-time check.
1008 DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1009 << "inbouds or in address space 0 may wrap:\n"
1010 << "LAA: Pointer: " << *Ptr << "\n"
1011 << "LAA: SCEV: " << *AR << "\n"
1012 << "LAA: Added an overflow assumption\n");
1013 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1021 /// Take the pointer operand from the Load/Store instruction.
1022 /// Returns NULL if this is not a valid Load/Store instruction.
1023 static Value *getPointerOperand(Value *I) {
1024 if (auto *LI = dyn_cast<LoadInst>(I))
1025 return LI->getPointerOperand();
1026 if (auto *SI = dyn_cast<StoreInst>(I))
1027 return SI->getPointerOperand();
1031 /// Take the address space operand from the Load/Store instruction.
1032 /// Returns -1 if this is not a valid Load/Store instruction.
1033 static unsigned getAddressSpaceOperand(Value *I) {
1034 if (LoadInst *L = dyn_cast<LoadInst>(I))
1035 return L->getPointerAddressSpace();
1036 if (StoreInst *S = dyn_cast<StoreInst>(I))
1037 return S->getPointerAddressSpace();
1041 /// Returns true if the memory operations \p A and \p B are consecutive.
1042 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1043 ScalarEvolution &SE, bool CheckType) {
1044 Value *PtrA = getPointerOperand(A);
1045 Value *PtrB = getPointerOperand(B);
1046 unsigned ASA = getAddressSpaceOperand(A);
1047 unsigned ASB = getAddressSpaceOperand(B);
1049 // Check that the address spaces match and that the pointers are valid.
1050 if (!PtrA || !PtrB || (ASA != ASB))
1053 // Make sure that A and B are different pointers.
1057 // Make sure that A and B have the same type if required.
1058 if (CheckType && PtrA->getType() != PtrB->getType())
1061 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
1062 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1063 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty));
1065 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
1066 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1067 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1069 // OffsetDelta = OffsetB - OffsetA;
1070 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
1071 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
1072 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
1073 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV);
1074 const APInt &OffsetDelta = OffsetDeltaC->getAPInt();
1075 // Check if they are based on the same pointer. That makes the offsets
1078 return OffsetDelta == Size;
1080 // Compute the necessary base pointer delta to have the necessary final delta
1081 // equal to the size.
1082 // BaseDelta = Size - OffsetDelta;
1083 const SCEV *SizeSCEV = SE.getConstant(Size);
1084 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
1086 // Otherwise compute the distance with SCEV between the base pointers.
1087 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1088 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1089 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
1090 return X == PtrSCEVB;
1093 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1097 case BackwardVectorizable:
1101 case ForwardButPreventsForwarding:
1103 case BackwardVectorizableButPreventsForwarding:
1106 llvm_unreachable("unexpected DepType!");
1109 bool MemoryDepChecker::Dependence::isBackward() const {
1113 case ForwardButPreventsForwarding:
1117 case BackwardVectorizable:
1119 case BackwardVectorizableButPreventsForwarding:
1122 llvm_unreachable("unexpected DepType!");
1125 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1126 return isBackward() || Type == Unknown;
1129 bool MemoryDepChecker::Dependence::isForward() const {
1132 case ForwardButPreventsForwarding:
1137 case BackwardVectorizable:
1139 case BackwardVectorizableButPreventsForwarding:
1142 llvm_unreachable("unexpected DepType!");
1145 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1146 uint64_t TypeByteSize) {
1147 // If loads occur at a distance that is not a multiple of a feasible vector
1148 // factor store-load forwarding does not take place.
1149 // Positive dependences might cause troubles because vectorizing them might
1150 // prevent store-load forwarding making vectorized code run a lot slower.
1151 // a[i] = a[i-3] ^ a[i-8];
1152 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1153 // hence on your typical architecture store-load forwarding does not take
1154 // place. Vectorizing in such cases does not make sense.
1155 // Store-load forwarding distance.
1157 // After this many iterations store-to-load forwarding conflicts should not
1158 // cause any slowdowns.
1159 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1160 // Maximum vector factor.
1161 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1162 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1164 // Compute the smallest VF at which the store and load would be misaligned.
1165 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1167 // If the number of vector iteration between the store and the load are
1168 // small we could incur conflicts.
1169 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1170 MaxVFWithoutSLForwardIssues = (VF >>= 1);
1175 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1176 DEBUG(dbgs() << "LAA: Distance " << Distance
1177 << " that could cause a store-load forwarding conflict\n");
1181 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1182 MaxVFWithoutSLForwardIssues !=
1183 VectorizerParams::MaxVectorWidth * TypeByteSize)
1184 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1188 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1189 /// memory accesses, that have the same stride whose absolute value is given
1190 /// in \p Stride, and that have the same type size \p TypeByteSize,
1191 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1192 /// possible to prove statically that the dependence distance is larger
1193 /// than the range that the accesses will travel through the execution of
1194 /// the loop. If so, return true; false otherwise. This is useful for
1195 /// example in loops such as the following (PR31098):
1196 /// for (i = 0; i < D; ++i) {
1200 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1201 const SCEV &BackedgeTakenCount,
1202 const SCEV &Dist, uint64_t Stride,
1203 uint64_t TypeByteSize) {
1205 // If we can prove that
1206 // (**) |Dist| > BackedgeTakenCount * Step
1207 // where Step is the absolute stride of the memory accesses in bytes,
1208 // then there is no dependence.
1211 // We basically want to check if the absolute distance (|Dist/Step|)
1212 // is >= the loop iteration count (or > BackedgeTakenCount).
1213 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1214 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1215 // that the dependence distance is >= VF; This is checked elsewhere.
1216 // But in some cases we can prune unknown dependence distances early, and
1217 // even before selecting the VF, and without a runtime test, by comparing
1218 // the distance against the loop iteration count. Since the vectorized code
1219 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1220 // also guarantees that distance >= VF.
1222 const uint64_t ByteStride = Stride * TypeByteSize;
1223 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1224 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1226 const SCEV *CastedDist = &Dist;
1227 const SCEV *CastedProduct = Product;
1228 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1229 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1231 // The dependence distance can be positive/negative, so we sign extend Dist;
1232 // The multiplication of the absolute stride in bytes and the
1233 // backdgeTakenCount is non-negative, so we zero extend Product.
1234 if (DistTypeSize > ProductTypeSize)
1235 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1237 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1239 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1240 // (If so, then we have proven (**) because |Dist| >= Dist)
1241 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1242 if (SE.isKnownPositive(Minus))
1245 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1246 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1247 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1248 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1249 if (SE.isKnownPositive(Minus))
1255 /// \brief Check the dependence for two accesses with the same stride \p Stride.
1256 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1259 /// \returns true if they are independent.
1260 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1261 uint64_t TypeByteSize) {
1262 assert(Stride > 1 && "The stride must be greater than 1");
1263 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1264 assert(Distance > 0 && "The distance must be non-zero");
1266 // Skip if the distance is not multiple of type byte size.
1267 if (Distance % TypeByteSize)
1270 uint64_t ScaledDist = Distance / TypeByteSize;
1272 // No dependence if the scaled distance is not multiple of the stride.
1274 // for (i = 0; i < 1024 ; i += 4)
1275 // A[i+2] = A[i] + 1;
1277 // Two accesses in memory (scaled distance is 2, stride is 4):
1278 // | A[0] | | | | A[4] | | | |
1279 // | | | A[2] | | | | A[6] | |
1282 // for (i = 0; i < 1024 ; i += 3)
1283 // A[i+4] = A[i] + 1;
1285 // Two accesses in memory (scaled distance is 4, stride is 3):
1286 // | A[0] | | | A[3] | | | A[6] | | |
1287 // | | | | | A[4] | | | A[7] | |
1288 return ScaledDist % Stride;
1291 MemoryDepChecker::Dependence::DepType
1292 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1293 const MemAccessInfo &B, unsigned BIdx,
1294 const ValueToValueMap &Strides) {
1295 assert (AIdx < BIdx && "Must pass arguments in program order");
1297 Value *APtr = A.getPointer();
1298 Value *BPtr = B.getPointer();
1299 bool AIsWrite = A.getInt();
1300 bool BIsWrite = B.getInt();
1302 // Two reads are independent.
1303 if (!AIsWrite && !BIsWrite)
1304 return Dependence::NoDep;
1306 // We cannot check pointers in different address spaces.
1307 if (APtr->getType()->getPointerAddressSpace() !=
1308 BPtr->getType()->getPointerAddressSpace())
1309 return Dependence::Unknown;
1311 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1312 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1314 const SCEV *Src = PSE.getSCEV(APtr);
1315 const SCEV *Sink = PSE.getSCEV(BPtr);
1317 // If the induction step is negative we have to invert source and sink of the
1319 if (StrideAPtr < 0) {
1320 std::swap(APtr, BPtr);
1321 std::swap(Src, Sink);
1322 std::swap(AIsWrite, BIsWrite);
1323 std::swap(AIdx, BIdx);
1324 std::swap(StrideAPtr, StrideBPtr);
1327 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1329 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1330 << "(Induction step: " << StrideAPtr << ")\n");
1331 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1332 << *InstMap[BIdx] << ": " << *Dist << "\n");
1334 // Need accesses with constant stride. We don't want to vectorize
1335 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1336 // the address space.
1337 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1338 DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1339 return Dependence::Unknown;
1342 Type *ATy = APtr->getType()->getPointerElementType();
1343 Type *BTy = BPtr->getType()->getPointerElementType();
1344 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1345 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1346 uint64_t Stride = std::abs(StrideAPtr);
1347 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1349 if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
1350 isSafeDependenceDistance(DL, *(PSE.getSE()),
1351 *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1353 return Dependence::NoDep;
1355 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1356 ShouldRetryWithRuntimeCheck = true;
1357 return Dependence::Unknown;
1360 const APInt &Val = C->getAPInt();
1361 int64_t Distance = Val.getSExtValue();
1363 // Attempt to prove strided accesses independent.
1364 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1365 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1366 DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1367 return Dependence::NoDep;
1370 // Negative distances are not plausible dependencies.
1371 if (Val.isNegative()) {
1372 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1373 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1374 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1376 DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1377 return Dependence::ForwardButPreventsForwarding;
1380 DEBUG(dbgs() << "LAA: Dependence is negative\n");
1381 return Dependence::Forward;
1384 // Write to the same location with the same size.
1385 // Could be improved to assert type sizes are the same (i32 == float, etc).
1388 return Dependence::Forward;
1389 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
1390 return Dependence::Unknown;
1393 assert(Val.isStrictlyPositive() && "Expect a positive value");
1397 "LAA: ReadWrite-Write positive dependency with different types\n");
1398 return Dependence::Unknown;
1401 // Bail out early if passed-in parameters make vectorization not feasible.
1402 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1403 VectorizerParams::VectorizationFactor : 1);
1404 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1405 VectorizerParams::VectorizationInterleave : 1);
1406 // The minimum number of iterations for a vectorized/unrolled version.
1407 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1409 // It's not vectorizable if the distance is smaller than the minimum distance
1410 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1411 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1412 // TypeByteSize (No need to plus the last gap distance).
1414 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1416 // int *B = (int *)((char *)A + 14);
1417 // for (i = 0 ; i < 1024 ; i += 2)
1421 // Two accesses in memory (stride is 2):
1422 // | A[0] | | A[2] | | A[4] | | A[6] | |
1423 // | B[0] | | B[2] | | B[4] |
1425 // Distance needs for vectorizing iterations except the last iteration:
1426 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1427 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1429 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1430 // 12, which is less than distance.
1432 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1433 // the minimum distance needed is 28, which is greater than distance. It is
1434 // not safe to do vectorization.
1435 uint64_t MinDistanceNeeded =
1436 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1437 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1438 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance
1440 return Dependence::Backward;
1443 // Unsafe if the minimum distance needed is greater than max safe distance.
1444 if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1445 DEBUG(dbgs() << "LAA: Failure because it needs at least "
1446 << MinDistanceNeeded << " size in bytes");
1447 return Dependence::Backward;
1450 // Positive distance bigger than max vectorization factor.
1451 // FIXME: Should use max factor instead of max distance in bytes, which could
1452 // not handle different types.
1453 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1454 // void foo (int *A, char *B) {
1455 // for (unsigned i = 0; i < 1024; i++) {
1456 // A[i+2] = A[i] + 1;
1457 // B[i+2] = B[i] + 1;
1461 // This case is currently unsafe according to the max safe distance. If we
1462 // analyze the two accesses on array B, the max safe dependence distance
1463 // is 2. Then we analyze the accesses on array A, the minimum distance needed
1464 // is 8, which is less than 2 and forbidden vectorization, But actually
1465 // both A and B could be vectorized by 2 iterations.
1466 MaxSafeDepDistBytes =
1467 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1469 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1470 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1471 couldPreventStoreLoadForward(Distance, TypeByteSize))
1472 return Dependence::BackwardVectorizableButPreventsForwarding;
1474 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1475 << " with max VF = "
1476 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n');
1478 return Dependence::BackwardVectorizable;
1481 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1482 MemAccessInfoList &CheckDeps,
1483 const ValueToValueMap &Strides) {
1485 MaxSafeDepDistBytes = -1;
1486 SmallPtrSet<MemAccessInfo, 8> Visited;
1487 for (MemAccessInfo CurAccess : CheckDeps) {
1488 if (Visited.count(CurAccess))
1491 // Get the relevant memory access set.
1492 EquivalenceClasses<MemAccessInfo>::iterator I =
1493 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1495 // Check accesses within this set.
1496 EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1497 AccessSets.member_begin(I);
1498 EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1499 AccessSets.member_end();
1501 // Check every access pair.
1503 Visited.insert(*AI);
1504 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
1506 // Check every accessing instruction pair in program order.
1507 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1508 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1509 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
1510 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
1511 auto A = std::make_pair(&*AI, *I1);
1512 auto B = std::make_pair(&*OI, *I2);
1518 Dependence::DepType Type =
1519 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1520 SafeForVectorization &= Dependence::isSafeForVectorization(Type);
1522 // Gather dependences unless we accumulated MaxDependences
1523 // dependences. In that case return as soon as we find the first
1524 // unsafe dependence. This puts a limit on this quadratic
1526 if (RecordDependences) {
1527 if (Type != Dependence::NoDep)
1528 Dependences.push_back(Dependence(A.second, B.second, Type));
1530 if (Dependences.size() >= MaxDependences) {
1531 RecordDependences = false;
1532 Dependences.clear();
1533 DEBUG(dbgs() << "Too many dependences, stopped recording\n");
1536 if (!RecordDependences && !SafeForVectorization)
1545 DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1546 return SafeForVectorization;
1549 SmallVector<Instruction *, 4>
1550 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1551 MemAccessInfo Access(Ptr, isWrite);
1552 auto &IndexVector = Accesses.find(Access)->second;
1554 SmallVector<Instruction *, 4> Insts;
1555 transform(IndexVector,
1556 std::back_inserter(Insts),
1557 [&](unsigned Idx) { return this->InstMap[Idx]; });
1561 const char *MemoryDepChecker::Dependence::DepName[] = {
1562 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1563 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1565 void MemoryDepChecker::Dependence::print(
1566 raw_ostream &OS, unsigned Depth,
1567 const SmallVectorImpl<Instruction *> &Instrs) const {
1568 OS.indent(Depth) << DepName[Type] << ":\n";
1569 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1570 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1573 bool LoopAccessInfo::canAnalyzeLoop() {
1574 // We need to have a loop header.
1575 DEBUG(dbgs() << "LAA: Found a loop in "
1576 << TheLoop->getHeader()->getParent()->getName() << ": "
1577 << TheLoop->getHeader()->getName() << '\n');
1579 // We can only analyze innermost loops.
1580 if (!TheLoop->empty()) {
1581 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1582 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1586 // We must have a single backedge.
1587 if (TheLoop->getNumBackEdges() != 1) {
1588 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1589 recordAnalysis("CFGNotUnderstood")
1590 << "loop control flow is not understood by analyzer";
1594 // We must have a single exiting block.
1595 if (!TheLoop->getExitingBlock()) {
1596 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1597 recordAnalysis("CFGNotUnderstood")
1598 << "loop control flow is not understood by analyzer";
1602 // We only handle bottom-tested loops, i.e. loop in which the condition is
1603 // checked at the end of each iteration. With that we can assume that all
1604 // instructions in the loop are executed the same number of times.
1605 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1606 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1607 recordAnalysis("CFGNotUnderstood")
1608 << "loop control flow is not understood by analyzer";
1612 // ScalarEvolution needs to be able to find the exit count.
1613 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1614 if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
1615 recordAnalysis("CantComputeNumberOfIterations")
1616 << "could not determine number of loop iterations";
1617 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1624 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
1625 const TargetLibraryInfo *TLI,
1626 DominatorTree *DT) {
1627 typedef SmallPtrSet<Value*, 16> ValueSet;
1629 // Holds the Load and Store instructions.
1630 SmallVector<LoadInst *, 16> Loads;
1631 SmallVector<StoreInst *, 16> Stores;
1633 // Holds all the different accesses in the loop.
1634 unsigned NumReads = 0;
1635 unsigned NumReadWrites = 0;
1637 PtrRtChecking->Pointers.clear();
1638 PtrRtChecking->Need = false;
1640 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1643 for (BasicBlock *BB : TheLoop->blocks()) {
1644 // Scan the BB and collect legal loads and stores.
1645 for (Instruction &I : *BB) {
1646 // If this is a load, save it. If this instruction can read from memory
1647 // but is not a load, then we quit. Notice that we don't handle function
1648 // calls that read or write.
1649 if (I.mayReadFromMemory()) {
1650 // Many math library functions read the rounding mode. We will only
1651 // vectorize a loop if it contains known function calls that don't set
1652 // the flag. Therefore, it is safe to ignore this read from memory.
1653 auto *Call = dyn_cast<CallInst>(&I);
1654 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1657 // If the function has an explicit vectorized counterpart, we can safely
1658 // assume that it can be vectorized.
1659 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1660 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName()))
1663 auto *Ld = dyn_cast<LoadInst>(&I);
1664 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
1665 recordAnalysis("NonSimpleLoad", Ld)
1666 << "read with atomic ordering or volatile read";
1667 DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1672 Loads.push_back(Ld);
1673 DepChecker->addAccess(Ld);
1674 if (EnableMemAccessVersioning)
1675 collectStridedAccess(Ld);
1679 // Save 'store' instructions. Abort if other instructions write to memory.
1680 if (I.mayWriteToMemory()) {
1681 auto *St = dyn_cast<StoreInst>(&I);
1683 recordAnalysis("CantVectorizeInstruction", St)
1684 << "instruction cannot be vectorized";
1688 if (!St->isSimple() && !IsAnnotatedParallel) {
1689 recordAnalysis("NonSimpleStore", St)
1690 << "write with atomic ordering or volatile write";
1691 DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1696 Stores.push_back(St);
1697 DepChecker->addAccess(St);
1698 if (EnableMemAccessVersioning)
1699 collectStridedAccess(St);
1704 // Now we have two lists that hold the loads and the stores.
1705 // Next, we find the pointers that they use.
1707 // Check if we see any stores. If there are no stores, then we don't
1708 // care if the pointers are *restrict*.
1709 if (!Stores.size()) {
1710 DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1715 MemoryDepChecker::DepCandidates DependentAccesses;
1716 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1717 AA, LI, DependentAccesses, *PSE);
1719 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1720 // multiple times on the same object. If the ptr is accessed twice, once
1721 // for read and once for write, it will only appear once (on the write
1722 // list). This is okay, since we are going to check for conflicts between
1723 // writes and between reads and writes, but not between reads and reads.
1726 for (StoreInst *ST : Stores) {
1727 Value *Ptr = ST->getPointerOperand();
1728 // Check for store to loop invariant address.
1729 StoreToLoopInvariantAddress |= isUniform(Ptr);
1730 // If we did *not* see this pointer before, insert it to the read-write
1731 // list. At this phase it is only a 'write' list.
1732 if (Seen.insert(Ptr).second) {
1735 MemoryLocation Loc = MemoryLocation::get(ST);
1736 // The TBAA metadata could have a control dependency on the predication
1737 // condition, so we cannot rely on it when determining whether or not we
1738 // need runtime pointer checks.
1739 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1740 Loc.AATags.TBAA = nullptr;
1742 Accesses.addStore(Loc);
1746 if (IsAnnotatedParallel) {
1748 << "LAA: A loop annotated parallel, ignore memory dependency "
1754 for (LoadInst *LD : Loads) {
1755 Value *Ptr = LD->getPointerOperand();
1756 // If we did *not* see this pointer before, insert it to the
1757 // read list. If we *did* see it before, then it is already in
1758 // the read-write list. This allows us to vectorize expressions
1759 // such as A[i] += x; Because the address of A[i] is a read-write
1760 // pointer. This only works if the index of A[i] is consecutive.
1761 // If the address of i is unknown (for example A[B[i]]) then we may
1762 // read a few words, modify, and write a few words, and some of the
1763 // words may be written to the same address.
1764 bool IsReadOnlyPtr = false;
1765 if (Seen.insert(Ptr).second ||
1766 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
1768 IsReadOnlyPtr = true;
1771 MemoryLocation Loc = MemoryLocation::get(LD);
1772 // The TBAA metadata could have a control dependency on the predication
1773 // condition, so we cannot rely on it when determining whether or not we
1774 // need runtime pointer checks.
1775 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1776 Loc.AATags.TBAA = nullptr;
1778 Accesses.addLoad(Loc, IsReadOnlyPtr);
1781 // If we write (or read-write) to a single destination and there are no
1782 // other reads in this loop then is it safe to vectorize.
1783 if (NumReadWrites == 1 && NumReads == 0) {
1784 DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1789 // Build dependence sets and check whether we need a runtime pointer bounds
1791 Accesses.buildDependenceSets();
1793 // Find pointers with computable bounds. We are going to use this information
1794 // to place a runtime bound check.
1795 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
1796 TheLoop, SymbolicStrides);
1797 if (!CanDoRTIfNeeded) {
1798 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
1799 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
1800 << "the array bounds.\n");
1805 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1808 if (Accesses.isDependencyCheckNeeded()) {
1809 DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1810 CanVecMem = DepChecker->areDepsSafe(
1811 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
1812 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
1814 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
1815 DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1817 // Clear the dependency checks. We assume they are not needed.
1818 Accesses.resetDepChecks(*DepChecker);
1820 PtrRtChecking->reset();
1821 PtrRtChecking->Need = true;
1823 auto *SE = PSE->getSE();
1824 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
1825 SymbolicStrides, true);
1827 // Check that we found the bounds for the pointer.
1828 if (!CanDoRTIfNeeded) {
1829 recordAnalysis("CantCheckMemDepsAtRunTime")
1830 << "cannot check memory dependencies at runtime";
1831 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1841 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
1842 << (PtrRtChecking->Need ? "" : " don't")
1843 << " need runtime memory checks.\n");
1845 recordAnalysis("UnsafeMemDep")
1846 << "unsafe dependent memory operations in loop. Use "
1847 "#pragma loop distribute(enable) to allow loop distribution "
1848 "to attempt to isolate the offending operations into a separate "
1850 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
1854 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
1855 DominatorTree *DT) {
1856 assert(TheLoop->contains(BB) && "Unknown block used");
1858 // Blocks that do not dominate the latch need predication.
1859 BasicBlock* Latch = TheLoop->getLoopLatch();
1860 return !DT->dominates(BB, Latch);
1863 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
1865 assert(!Report && "Multiple reports generated");
1867 Value *CodeRegion = TheLoop->getHeader();
1868 DebugLoc DL = TheLoop->getStartLoc();
1871 CodeRegion = I->getParent();
1872 // If there is no debug location attached to the instruction, revert back to
1873 // using the loop's.
1874 if (I->getDebugLoc())
1875 DL = I->getDebugLoc();
1878 Report = make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
1883 bool LoopAccessInfo::isUniform(Value *V) const {
1884 auto *SE = PSE->getSE();
1885 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
1886 // never considered uniform.
1887 // TODO: Is this really what we want? Even without FP SCEV, we may want some
1888 // trivially loop-invariant FP values to be considered uniform.
1889 if (!SE->isSCEVable(V->getType()))
1891 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
1894 // FIXME: this function is currently a duplicate of the one in
1895 // LoopVectorize.cpp.
1896 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
1900 if (Instruction *I = dyn_cast<Instruction>(V))
1901 return I->getParent() == Loc->getParent() ? I : nullptr;
1907 /// \brief IR Values for the lower and upper bounds of a pointer evolution. We
1908 /// need to use value-handles because SCEV expansion can invalidate previously
1909 /// expanded values. Thus expansion of a pointer can invalidate the bounds for
1911 struct PointerBounds {
1912 TrackingVH<Value> Start;
1913 TrackingVH<Value> End;
1916 } // end anonymous namespace
1918 /// \brief Expand code for the lower and upper bound of the pointer group \p CG
1919 /// in \p TheLoop. \return the values for the bounds.
1920 static PointerBounds
1921 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
1922 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE,
1923 const RuntimePointerChecking &PtrRtChecking) {
1924 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue;
1925 const SCEV *Sc = SE->getSCEV(Ptr);
1927 unsigned AS = Ptr->getType()->getPointerAddressSpace();
1928 LLVMContext &Ctx = Loc->getContext();
1930 // Use this type for pointer arithmetic.
1931 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
1933 if (SE->isLoopInvariant(Sc, TheLoop)) {
1934 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr
1936 // Ptr could be in the loop body. If so, expand a new one at the correct
1938 Instruction *Inst = dyn_cast<Instruction>(Ptr);
1939 Value *NewPtr = (Inst && TheLoop->contains(Inst))
1940 ? Exp.expandCodeFor(Sc, PtrArithTy, Loc)
1942 // We must return a half-open range, which means incrementing Sc.
1943 const SCEV *ScPlusOne = SE->getAddExpr(Sc, SE->getOne(PtrArithTy));
1944 Value *NewPtrPlusOne = Exp.expandCodeFor(ScPlusOne, PtrArithTy, Loc);
1945 return {NewPtr, NewPtrPlusOne};
1947 Value *Start = nullptr, *End = nullptr;
1948 DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
1949 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
1950 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
1951 DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n");
1952 return {Start, End};
1956 /// \brief Turns a collection of checks into a collection of expanded upper and
1957 /// lower bounds for both pointers in the check.
1958 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds(
1959 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks,
1960 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp,
1961 const RuntimePointerChecking &PtrRtChecking) {
1962 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
1964 // Here we're relying on the SCEV Expander's cache to only emit code for the
1965 // same bounds once.
1967 PointerChecks, std::back_inserter(ChecksWithBounds),
1968 [&](const RuntimePointerChecking::PointerCheck &Check) {
1970 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking),
1971 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking);
1972 return std::make_pair(First, Second);
1975 return ChecksWithBounds;
1978 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks(
1980 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks)
1982 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1983 auto *SE = PSE->getSE();
1984 SCEVExpander Exp(*SE, DL, "induction");
1985 auto ExpandedChecks =
1986 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking);
1988 LLVMContext &Ctx = Loc->getContext();
1989 Instruction *FirstInst = nullptr;
1990 IRBuilder<> ChkBuilder(Loc);
1991 // Our instructions might fold to a constant.
1992 Value *MemoryRuntimeCheck = nullptr;
1994 for (const auto &Check : ExpandedChecks) {
1995 const PointerBounds &A = Check.first, &B = Check.second;
1996 // Check if two pointers (A and B) conflict where conflict is computed as:
1997 // start(A) <= end(B) && start(B) <= end(A)
1998 unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
1999 unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
2001 assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&
2002 (AS1 == A.End->getType()->getPointerAddressSpace()) &&
2003 "Trying to bounds check pointers with different address spaces");
2005 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
2006 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
2008 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
2009 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
2010 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc");
2011 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc");
2013 // [A|B].Start points to the first accessed byte under base [A|B].
2014 // [A|B].End points to the last accessed byte, plus one.
2015 // There is no conflict when the intervals are disjoint:
2016 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
2018 // bound0 = (B.Start < A.End)
2019 // bound1 = (A.Start < B.End)
2020 // IsConflict = bound0 & bound1
2021 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
2022 FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
2023 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
2024 FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
2025 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
2026 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
2027 if (MemoryRuntimeCheck) {
2029 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
2030 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
2032 MemoryRuntimeCheck = IsConflict;
2035 if (!MemoryRuntimeCheck)
2036 return std::make_pair(nullptr, nullptr);
2038 // We have to do this trickery because the IRBuilder might fold the check to a
2039 // constant expression in which case there is no Instruction anchored in a
2041 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
2042 ConstantInt::getTrue(Ctx));
2043 ChkBuilder.Insert(Check, "memcheck.conflict");
2044 FirstInst = getFirstInst(FirstInst, Check, Loc);
2045 return std::make_pair(FirstInst, Check);
2048 std::pair<Instruction *, Instruction *>
2049 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const {
2050 if (!PtrRtChecking->Need)
2051 return std::make_pair(nullptr, nullptr);
2053 return addRuntimeChecks(Loc, PtrRtChecking->getChecks());
2056 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2057 Value *Ptr = nullptr;
2058 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
2059 Ptr = LI->getPointerOperand();
2060 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
2061 Ptr = SI->getPointerOperand();
2065 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2069 DEBUG(dbgs() << "LAA: Found a strided access that we can version");
2070 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2071 SymbolicStrides[Ptr] = Stride;
2072 StrideSet.insert(Stride);
2075 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2076 const TargetLibraryInfo *TLI, AliasAnalysis *AA,
2077 DominatorTree *DT, LoopInfo *LI)
2078 : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2079 PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)),
2080 DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
2081 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2082 StoreToLoopInvariantAddress(false) {
2083 if (canAnalyzeLoop())
2084 analyzeLoop(AA, LI, TLI, DT);
2087 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2089 OS.indent(Depth) << "Memory dependences are safe";
2090 if (MaxSafeDepDistBytes != -1ULL)
2091 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2093 if (PtrRtChecking->Need)
2094 OS << " with run-time checks";
2099 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2101 if (auto *Dependences = DepChecker->getDependences()) {
2102 OS.indent(Depth) << "Dependences:\n";
2103 for (auto &Dep : *Dependences) {
2104 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2108 OS.indent(Depth) << "Too many dependences, not recorded\n";
2110 // List the pair of accesses need run-time checks to prove independence.
2111 PtrRtChecking->print(OS, Depth);
2114 OS.indent(Depth) << "Store to invariant address was "
2115 << (StoreToLoopInvariantAddress ? "" : "not ")
2116 << "found in loop.\n";
2118 OS.indent(Depth) << "SCEV assumptions:\n";
2119 PSE->getUnionPredicate().print(OS, Depth);
2123 OS.indent(Depth) << "Expressions re-written:\n";
2124 PSE->print(OS, Depth);
2127 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2128 auto &LAI = LoopAccessInfoMap[L];
2131 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2136 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2137 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2139 for (Loop *TopLevelLoop : *LI)
2140 for (Loop *L : depth_first(TopLevelLoop)) {
2141 OS.indent(2) << L->getHeader()->getName() << ":\n";
2142 auto &LAI = LAA.getInfo(L);
2147 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2148 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2149 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2150 TLI = TLIP ? &TLIP->getTLI() : nullptr;
2151 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2152 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2153 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2158 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2159 AU.addRequired<ScalarEvolutionWrapperPass>();
2160 AU.addRequired<AAResultsWrapperPass>();
2161 AU.addRequired<DominatorTreeWrapperPass>();
2162 AU.addRequired<LoopInfoWrapperPass>();
2164 AU.setPreservesAll();
2167 char LoopAccessLegacyAnalysis::ID = 0;
2168 static const char laa_name[] = "Loop Access Analysis";
2169 #define LAA_NAME "loop-accesses"
2171 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2172 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2173 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2174 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2175 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2176 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2178 AnalysisKey LoopAccessAnalysis::Key;
2180 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2181 LoopStandardAnalysisResults &AR) {
2182 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2187 Pass *createLAAPass() {
2188 return new LoopAccessLegacyAnalysis();
2191 } // end namespace llvm