1 //===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the Loop Distribution Pass. Its main focus is to
11 // distribute loops that cannot be vectorized due to dependence cycles. It
12 // tries to isolate the offending dependences into a new loop allowing
13 // vectorization of the remaining parts.
15 // For dependence analysis, the pass uses the LoopVectorizer's
16 // LoopAccessAnalysis. Because this analysis presumes no change in the order of
17 // memory operations, special care is taken to preserve the lexical order of
20 // Similarly to the Vectorizer, the pass also supports loop versioning to
21 // run-time disambiguate potentially overlapping arrays.
23 //===----------------------------------------------------------------------===//
25 #include "llvm/Transforms/Scalar/LoopDistribute.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DepthFirstIterator.h"
28 #include "llvm/ADT/EquivalenceClasses.h"
29 #include "llvm/ADT/Optional.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/StringRef.h"
35 #include "llvm/ADT/Twine.h"
36 #include "llvm/ADT/iterator_range.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/GlobalsModRef.h"
40 #include "llvm/Analysis/LoopAccessAnalysis.h"
41 #include "llvm/Analysis/LoopAnalysisManager.h"
42 #include "llvm/Analysis/LoopInfo.h"
43 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/TargetLibraryInfo.h"
46 #include "llvm/Analysis/TargetTransformInfo.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DiagnosticInfo.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/PassManager.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/Scalar.h"
65 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
66 #include "llvm/Transforms/Utils/Cloning.h"
67 #include "llvm/Transforms/Utils/LoopUtils.h"
68 #include "llvm/Transforms/Utils/LoopVersioning.h"
69 #include "llvm/Transforms/Utils/ValueMapper.h"
78 #define LDIST_NAME "loop-distribute"
79 #define DEBUG_TYPE LDIST_NAME
82 LDistVerify("loop-distribute-verify", cl::Hidden,
83 cl::desc("Turn on DominatorTree and LoopInfo verification "
84 "after Loop Distribution"),
87 static cl::opt<bool> DistributeNonIfConvertible(
88 "loop-distribute-non-if-convertible", cl::Hidden,
89 cl::desc("Whether to distribute into a loop that may not be "
90 "if-convertible by the loop vectorizer"),
93 static cl::opt<unsigned> DistributeSCEVCheckThreshold(
94 "loop-distribute-scev-check-threshold", cl::init(8), cl::Hidden,
95 cl::desc("The maximum number of SCEV checks allowed for Loop "
98 static cl::opt<unsigned> PragmaDistributeSCEVCheckThreshold(
99 "loop-distribute-scev-check-threshold-with-pragma", cl::init(128),
102 "The maximum number of SCEV checks allowed for Loop "
103 "Distribution for loop marked with #pragma loop distribute(enable)"));
105 static cl::opt<bool> EnableLoopDistribute(
106 "enable-loop-distribute", cl::Hidden,
107 cl::desc("Enable the new, experimental LoopDistribution Pass"),
110 STATISTIC(NumLoopsDistributed, "Number of loops distributed");
114 /// Maintains the set of instructions of the loop for a partition before
115 /// cloning. After cloning, it hosts the new loop.
116 class InstPartition {
117 using InstructionSet = SmallPtrSet<Instruction *, 8>;
120 InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
121 : DepCycle(DepCycle), OrigLoop(L) {
125 /// Returns whether this partition contains a dependence cycle.
126 bool hasDepCycle() const { return DepCycle; }
128 /// Adds an instruction to this partition.
129 void add(Instruction *I) { Set.insert(I); }
131 /// Collection accessors.
132 InstructionSet::iterator begin() { return Set.begin(); }
133 InstructionSet::iterator end() { return Set.end(); }
134 InstructionSet::const_iterator begin() const { return Set.begin(); }
135 InstructionSet::const_iterator end() const { return Set.end(); }
136 bool empty() const { return Set.empty(); }
138 /// Moves this partition into \p Other. This partition becomes empty
140 void moveTo(InstPartition &Other) {
141 Other.Set.insert(Set.begin(), Set.end());
143 Other.DepCycle |= DepCycle;
146 /// Populates the partition with a transitive closure of all the
147 /// instructions that the seeded instructions dependent on.
148 void populateUsedSet() {
149 // FIXME: We currently don't use control-dependence but simply include all
150 // blocks (possibly empty at the end) and let simplifycfg mostly clean this
152 for (auto *B : OrigLoop->getBlocks())
153 Set.insert(B->getTerminator());
155 // Follow the use-def chains to form a transitive closure of all the
156 // instructions that the originally seeded instructions depend on.
157 SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end());
158 while (!Worklist.empty()) {
159 Instruction *I = Worklist.pop_back_val();
160 // Insert instructions from the loop that we depend on.
161 for (Value *V : I->operand_values()) {
162 auto *I = dyn_cast<Instruction>(V);
163 if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
164 Worklist.push_back(I);
169 /// Clones the original loop.
171 /// Updates LoopInfo and DominatorTree using the information that block \p
172 /// LoopDomBB dominates the loop.
173 Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB,
174 unsigned Index, LoopInfo *LI,
176 ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop,
177 VMap, Twine(".ldist") + Twine(Index),
178 LI, DT, ClonedLoopBlocks);
182 /// The cloned loop. If this partition is mapped to the original loop,
184 const Loop *getClonedLoop() const { return ClonedLoop; }
186 /// Returns the loop where this partition ends up after distribution.
187 /// If this partition is mapped to the original loop then use the block from
189 const Loop *getDistributedLoop() const {
190 return ClonedLoop ? ClonedLoop : OrigLoop;
193 /// The VMap that is populated by cloning and then used in
194 /// remapinstruction to remap the cloned instructions.
195 ValueToValueMapTy &getVMap() { return VMap; }
197 /// Remaps the cloned instructions using VMap.
198 void remapInstructions() {
199 remapInstructionsInBlocks(ClonedLoopBlocks, VMap);
202 /// Based on the set of instructions selected for this partition,
203 /// removes the unnecessary ones.
204 void removeUnusedInsts() {
205 SmallVector<Instruction *, 8> Unused;
207 for (auto *Block : OrigLoop->getBlocks())
208 for (auto &Inst : *Block)
209 if (!Set.count(&Inst)) {
210 Instruction *NewInst = &Inst;
212 NewInst = cast<Instruction>(VMap[NewInst]);
214 assert(!isa<BranchInst>(NewInst) &&
215 "Branches are marked used early on");
216 Unused.push_back(NewInst);
219 // Delete the instructions backwards, as it has a reduced likelihood of
220 // having to update as many def-use and use-def chains.
221 for (auto *Inst : reverse(Unused)) {
222 if (!Inst->use_empty())
223 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
224 Inst->eraseFromParent();
230 dbgs() << " (cycle)\n";
232 // Prefix with the block name.
233 dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
236 void printBlocks() const {
237 for (auto *BB : getDistributedLoop()->getBlocks())
242 /// Instructions from OrigLoop selected for this partition.
245 /// Whether this partition contains a dependence cycle.
248 /// The original loop.
251 /// The cloned loop. If this partition is mapped to the original loop,
253 Loop *ClonedLoop = nullptr;
255 /// The blocks of ClonedLoop including the preheader. If this
256 /// partition is mapped to the original loop, this is empty.
257 SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
259 /// These gets populated once the set of instructions have been
260 /// finalized. If this partition is mapped to the original loop, these are not
262 ValueToValueMapTy VMap;
265 /// Holds the set of Partitions. It populates them, merges them and then
266 /// clones the loops.
267 class InstPartitionContainer {
268 using InstToPartitionIdT = DenseMap<Instruction *, int>;
271 InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
272 : L(L), LI(LI), DT(DT) {}
274 /// Returns the number of partitions.
275 unsigned getSize() const { return PartitionContainer.size(); }
277 /// Adds \p Inst into the current partition if that is marked to
278 /// contain cycles. Otherwise start a new partition for it.
279 void addToCyclicPartition(Instruction *Inst) {
280 // If the current partition is non-cyclic. Start a new one.
281 if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle())
282 PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true);
284 PartitionContainer.back().add(Inst);
287 /// Adds \p Inst into a partition that is not marked to contain
288 /// dependence cycles.
290 // Initially we isolate memory instructions into as many partitions as
291 // possible, then later we may merge them back together.
292 void addToNewNonCyclicPartition(Instruction *Inst) {
293 PartitionContainer.emplace_back(Inst, L);
296 /// Merges adjacent non-cyclic partitions.
298 /// The idea is that we currently only want to isolate the non-vectorizable
299 /// partition. We could later allow more distribution among these partition
301 void mergeAdjacentNonCyclic() {
302 mergeAdjacentPartitionsIf(
303 [](const InstPartition *P) { return !P->hasDepCycle(); });
306 /// If a partition contains only conditional stores, we won't vectorize
307 /// it. Try to merge it with a previous cyclic partition.
308 void mergeNonIfConvertible() {
309 mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
310 if (Partition->hasDepCycle())
313 // Now, check if all stores are conditional in this partition.
314 bool seenStore = false;
316 for (auto *Inst : *Partition)
317 if (isa<StoreInst>(Inst)) {
319 if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT))
326 /// Merges the partitions according to various heuristics.
327 void mergeBeforePopulating() {
328 mergeAdjacentNonCyclic();
329 if (!DistributeNonIfConvertible)
330 mergeNonIfConvertible();
333 /// Merges partitions in order to ensure that no loads are duplicated.
335 /// We can't duplicate loads because that could potentially reorder them.
336 /// LoopAccessAnalysis provides dependency information with the context that
337 /// the order of memory operation is preserved.
339 /// Return if any partitions were merged.
340 bool mergeToAvoidDuplicatedLoads() {
341 using LoadToPartitionT = DenseMap<Instruction *, InstPartition *>;
342 using ToBeMergedT = EquivalenceClasses<InstPartition *>;
344 LoadToPartitionT LoadToPartition;
345 ToBeMergedT ToBeMerged;
347 // Step through the partitions and create equivalence between partitions
348 // that contain the same load. Also put partitions in between them in the
349 // same equivalence class to avoid reordering of memory operations.
350 for (PartitionContainerT::iterator I = PartitionContainer.begin(),
351 E = PartitionContainer.end();
355 // If a load occurs in two partitions PartI and PartJ, merge all
356 // partitions (PartI, PartJ] into PartI.
357 for (Instruction *Inst : *PartI)
358 if (isa<LoadInst>(Inst)) {
360 LoadToPartitionT::iterator LoadToPart;
362 std::tie(LoadToPart, NewElt) =
363 LoadToPartition.insert(std::make_pair(Inst, PartI));
366 << "Merging partitions due to this load in multiple "
367 << "partitions: " << PartI << ", " << LoadToPart->second
374 ToBeMerged.unionSets(PartI, &*PartJ);
375 } while (&*PartJ != LoadToPart->second);
379 if (ToBeMerged.empty())
382 // Merge the member of an equivalence class into its class leader. This
383 // makes the members empty.
384 for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end();
389 auto PartI = I->getData();
390 for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)),
391 ToBeMerged.member_end())) {
392 PartJ->moveTo(*PartI);
396 // Remove the empty partitions.
397 PartitionContainer.remove_if(
398 [](const InstPartition &P) { return P.empty(); });
403 /// Sets up the mapping between instructions to partitions. If the
404 /// instruction is duplicated across multiple partitions, set the entry to -1.
405 void setupPartitionIdOnInstructions() {
407 for (const auto &Partition : PartitionContainer) {
408 for (Instruction *Inst : Partition) {
410 InstToPartitionIdT::iterator Iter;
412 std::tie(Iter, NewElt) =
413 InstToPartitionId.insert(std::make_pair(Inst, PartitionID));
421 /// Populates the partition with everything that the seeding
422 /// instructions require.
423 void populateUsedSet() {
424 for (auto &P : PartitionContainer)
428 /// This performs the main chunk of the work of cloning the loops for
431 BasicBlock *OrigPH = L->getLoopPreheader();
432 // At this point the predecessor of the preheader is either the memcheck
433 // block or the top part of the original preheader.
434 BasicBlock *Pred = OrigPH->getSinglePredecessor();
435 assert(Pred && "Preheader does not have a single predecessor");
436 BasicBlock *ExitBlock = L->getExitBlock();
437 assert(ExitBlock && "No single exit block");
440 assert(!PartitionContainer.empty() && "at least two partitions expected");
441 // We're cloning the preheader along with the loop so we already made sure
443 assert(&*OrigPH->begin() == OrigPH->getTerminator() &&
444 "preheader not empty");
446 // Create a loop for each partition except the last. Clone the original
447 // loop before PH along with adding a preheader for the cloned loop. Then
448 // update PH to point to the newly added preheader.
449 BasicBlock *TopPH = OrigPH;
450 unsigned Index = getSize() - 1;
451 for (auto I = std::next(PartitionContainer.rbegin()),
452 E = PartitionContainer.rend();
453 I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) {
456 NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT);
458 Part->getVMap()[ExitBlock] = TopPH;
459 Part->remapInstructions();
461 Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH);
463 // Now go in forward order and update the immediate dominator for the
464 // preheaders with the exiting block of the previous loop. Dominance
465 // within the loop is updated in cloneLoopWithPreheader.
466 for (auto Curr = PartitionContainer.cbegin(),
467 Next = std::next(PartitionContainer.cbegin()),
468 E = PartitionContainer.cend();
469 Next != E; ++Curr, ++Next)
470 DT->changeImmediateDominator(
471 Next->getDistributedLoop()->getLoopPreheader(),
472 Curr->getDistributedLoop()->getExitingBlock());
475 /// Removes the dead instructions from the cloned loops.
476 void removeUnusedInsts() {
477 for (auto &Partition : PartitionContainer)
478 Partition.removeUnusedInsts();
481 /// For each memory pointer, it computes the partitionId the pointer is
484 /// This returns an array of int where the I-th entry corresponds to I-th
485 /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
486 /// partitions its entry is set to -1.
488 computePartitionSetForPointers(const LoopAccessInfo &LAI) {
489 const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking();
491 unsigned N = RtPtrCheck->Pointers.size();
492 SmallVector<int, 8> PtrToPartitions(N);
493 for (unsigned I = 0; I < N; ++I) {
494 Value *Ptr = RtPtrCheck->Pointers[I].PointerValue;
496 LAI.getInstructionsForAccess(Ptr, RtPtrCheck->Pointers[I].IsWritePtr);
498 int &Partition = PtrToPartitions[I];
499 // First set it to uninitialized.
501 for (Instruction *Inst : Instructions) {
502 // Note that this could be -1 if Inst is duplicated across multiple
504 int ThisPartition = this->InstToPartitionId[Inst];
506 Partition = ThisPartition;
507 // -1 means belonging to multiple partitions.
508 else if (Partition == -1)
510 else if (Partition != (int)ThisPartition)
513 assert(Partition != -2 && "Pointer not belonging to any partition");
516 return PtrToPartitions;
519 void print(raw_ostream &OS) const {
521 for (const auto &P : PartitionContainer) {
522 OS << "Partition " << Index++ << " (" << &P << "):\n";
527 void dump() const { print(dbgs()); }
530 friend raw_ostream &operator<<(raw_ostream &OS,
531 const InstPartitionContainer &Partitions) {
532 Partitions.print(OS);
537 void printBlocks() const {
539 for (const auto &P : PartitionContainer) {
540 dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
546 using PartitionContainerT = std::list<InstPartition>;
548 /// List of partitions.
549 PartitionContainerT PartitionContainer;
551 /// Mapping from Instruction to partition Id. If the instruction
552 /// belongs to multiple partitions the entry contains -1.
553 InstToPartitionIdT InstToPartitionId;
559 /// The control structure to merge adjacent partitions if both satisfy
560 /// the \p Predicate.
561 template <class UnaryPredicate>
562 void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
563 InstPartition *PrevMatch = nullptr;
564 for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) {
565 auto DoesMatch = Predicate(&*I);
566 if (PrevMatch == nullptr && DoesMatch) {
569 } else if (PrevMatch != nullptr && DoesMatch) {
570 I->moveTo(*PrevMatch);
571 I = PartitionContainer.erase(I);
580 /// For each memory instruction, this class maintains difference of the
581 /// number of unsafe dependences that start out from this instruction minus
582 /// those that end here.
584 /// By traversing the memory instructions in program order and accumulating this
585 /// number, we know whether any unsafe dependence crosses over a program point.
586 class MemoryInstructionDependences {
587 using Dependence = MemoryDepChecker::Dependence;
592 unsigned NumUnsafeDependencesStartOrEnd = 0;
594 Entry(Instruction *Inst) : Inst(Inst) {}
597 using AccessesType = SmallVector<Entry, 8>;
599 AccessesType::const_iterator begin() const { return Accesses.begin(); }
600 AccessesType::const_iterator end() const { return Accesses.end(); }
602 MemoryInstructionDependences(
603 const SmallVectorImpl<Instruction *> &Instructions,
604 const SmallVectorImpl<Dependence> &Dependences) {
605 Accesses.append(Instructions.begin(), Instructions.end());
607 LLVM_DEBUG(dbgs() << "Backward dependences:\n");
608 for (auto &Dep : Dependences)
609 if (Dep.isPossiblyBackward()) {
610 // Note that the designations source and destination follow the program
611 // order, i.e. source is always first. (The direction is given by the
613 ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
614 --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
616 LLVM_DEBUG(Dep.print(dbgs(), 2, Instructions));
621 AccessesType Accesses;
624 /// The actual class performing the per-loop work.
625 class LoopDistributeForLoop {
627 LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT,
628 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
629 : L(L), F(F), LI(LI), DT(DT), SE(SE), ORE(ORE) {
633 /// Try to distribute an inner-most loop.
634 bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
635 assert(L->empty() && "Only process inner loops.");
637 LLVM_DEBUG(dbgs() << "\nLDist: In \""
638 << L->getHeader()->getParent()->getName()
639 << "\" checking " << *L << "\n");
641 if (!L->getExitBlock())
642 return fail("MultipleExitBlocks", "multiple exit blocks");
643 if (!L->isLoopSimplifyForm())
644 return fail("NotLoopSimplifyForm",
645 "loop is not in loop-simplify form");
647 BasicBlock *PH = L->getLoopPreheader();
649 // LAA will check that we only have a single exiting block.
652 // Currently, we only distribute to isolate the part of the loop with
653 // dependence cycles to enable partial vectorization.
654 if (LAI->canVectorizeMemory())
655 return fail("MemOpsCanBeVectorized",
656 "memory operations are safe for vectorization");
658 auto *Dependences = LAI->getDepChecker().getDependences();
659 if (!Dependences || Dependences->empty())
660 return fail("NoUnsafeDeps", "no unsafe dependences to isolate");
662 InstPartitionContainer Partitions(L, LI, DT);
664 // First, go through each memory operation and assign them to consecutive
665 // partitions (the order of partitions follows program order). Put those
666 // with unsafe dependences into "cyclic" partition otherwise put each store
667 // in its own "non-cyclic" partition (we'll merge these later).
669 // Note that a memory operation (e.g. Load2 below) at a program point that
670 // has an unsafe dependence (Store3->Load1) spanning over it must be
671 // included in the same cyclic partition as the dependent operations. This
672 // is to preserve the original program order after distribution. E.g.:
674 // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
676 // Load2 | /Unsafe/ 0 1
680 // NumUnsafeDependencesActive > 0 indicates this situation and in this case
681 // we just keep assigning to the same cyclic partition until
682 // NumUnsafeDependencesActive reaches 0.
683 const MemoryDepChecker &DepChecker = LAI->getDepChecker();
684 MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(),
687 int NumUnsafeDependencesActive = 0;
688 for (auto &InstDep : MID) {
689 Instruction *I = InstDep.Inst;
690 // We update NumUnsafeDependencesActive post-instruction, catch the
691 // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
692 if (NumUnsafeDependencesActive ||
693 InstDep.NumUnsafeDependencesStartOrEnd > 0)
694 Partitions.addToCyclicPartition(I);
696 Partitions.addToNewNonCyclicPartition(I);
697 NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd;
698 assert(NumUnsafeDependencesActive >= 0 &&
699 "Negative number of dependences active");
702 // Add partitions for values used outside. These partitions can be out of
703 // order from the original program order. This is OK because if the
704 // partition uses a load we will merge this partition with the original
705 // partition of the load that we set up in the previous loop (see
706 // mergeToAvoidDuplicatedLoads).
707 auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L);
708 for (auto *Inst : DefsUsedOutside)
709 Partitions.addToNewNonCyclicPartition(Inst);
711 LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
712 if (Partitions.getSize() < 2)
713 return fail("CantIsolateUnsafeDeps",
714 "cannot isolate unsafe dependencies");
716 // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
717 // should be able to vectorize these together.
718 Partitions.mergeBeforePopulating();
719 LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
720 if (Partitions.getSize() < 2)
721 return fail("CantIsolateUnsafeDeps",
722 "cannot isolate unsafe dependencies");
724 // Now, populate the partitions with non-memory operations.
725 Partitions.populateUsedSet();
726 LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
728 // In order to preserve original lexical order for loads, keep them in the
729 // partition that we set up in the MemoryInstructionDependences loop.
730 if (Partitions.mergeToAvoidDuplicatedLoads()) {
731 LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
733 if (Partitions.getSize() < 2)
734 return fail("CantIsolateUnsafeDeps",
735 "cannot isolate unsafe dependencies");
738 // Don't distribute the loop if we need too many SCEV run-time checks.
739 const SCEVUnionPredicate &Pred = LAI->getPSE().getUnionPredicate();
740 if (Pred.getComplexity() > (IsForced.getValueOr(false)
741 ? PragmaDistributeSCEVCheckThreshold
742 : DistributeSCEVCheckThreshold))
743 return fail("TooManySCEVRuntimeChecks",
744 "too many SCEV run-time checks needed.\n");
746 LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
747 // We're done forming the partitions set up the reverse mapping from
748 // instructions to partitions.
749 Partitions.setupPartitionIdOnInstructions();
751 // To keep things simple have an empty preheader before we version or clone
752 // the loop. (Also split if this has no predecessor, i.e. entry, because we
753 // rely on PH having a predecessor.)
754 if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator())
755 SplitBlock(PH, PH->getTerminator(), DT, LI);
757 // If we need run-time checks, version the loop now.
758 auto PtrToPartition = Partitions.computePartitionSetForPointers(*LAI);
759 const auto *RtPtrChecking = LAI->getRuntimePointerChecking();
760 const auto &AllChecks = RtPtrChecking->getChecks();
761 auto Checks = includeOnlyCrossPartitionChecks(AllChecks, PtrToPartition,
764 if (!Pred.isAlwaysTrue() || !Checks.empty()) {
765 LLVM_DEBUG(dbgs() << "\nPointers:\n");
766 LLVM_DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
767 LoopVersioning LVer(*LAI, L, LI, DT, SE, false);
768 LVer.setAliasChecks(std::move(Checks));
769 LVer.setSCEVChecks(LAI->getPSE().getUnionPredicate());
770 LVer.versionLoop(DefsUsedOutside);
771 LVer.annotateLoopWithNoAlias();
774 // Create identical copies of the original loop for each partition and hook
775 // them up sequentially.
776 Partitions.cloneLoops();
778 // Now, we remove the instruction from each loop that don't belong to that
780 Partitions.removeUnusedInsts();
781 LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
782 LLVM_DEBUG(Partitions.printBlocks());
786 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
789 ++NumLoopsDistributed;
790 // Report the success.
792 return OptimizationRemark(LDIST_NAME, "Distribute", L->getStartLoc(),
794 << "distributed loop";
799 /// Provide diagnostics then \return with false.
800 bool fail(StringRef RemarkName, StringRef Message) {
801 LLVMContext &Ctx = F->getContext();
802 bool Forced = isForced().getValueOr(false);
804 LLVM_DEBUG(dbgs() << "Skipping; " << Message << "\n");
806 // With Rpass-missed report that distribution failed.
808 return OptimizationRemarkMissed(LDIST_NAME, "NotDistributed",
809 L->getStartLoc(), L->getHeader())
810 << "loop not distributed: use -Rpass-analysis=loop-distribute for "
815 // With Rpass-analysis report why. This is on by default if distribution
816 // was requested explicitly.
817 ORE->emit(OptimizationRemarkAnalysis(
818 Forced ? OptimizationRemarkAnalysis::AlwaysPrint : LDIST_NAME,
819 RemarkName, L->getStartLoc(), L->getHeader())
820 << "loop not distributed: " << Message);
822 // Also issue a warning if distribution was requested explicitly but it
825 Ctx.diagnose(DiagnosticInfoOptimizationFailure(
826 *F, L->getStartLoc(), "loop not distributed: failed "
827 "explicitly specified loop distribution"));
832 /// Return if distribution forced to be enabled/disabled for the loop.
834 /// If the optional has a value, it indicates whether distribution was forced
835 /// to be enabled (true) or disabled (false). If the optional has no value
836 /// distribution was not forced either way.
837 const Optional<bool> &isForced() const { return IsForced; }
840 /// Filter out checks between pointers from the same partition.
842 /// \p PtrToPartition contains the partition number for pointers. Partition
843 /// number -1 means that the pointer is used in multiple partitions. In this
844 /// case we can't safely omit the check.
845 SmallVector<RuntimePointerChecking::PointerCheck, 4>
846 includeOnlyCrossPartitionChecks(
847 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &AllChecks,
848 const SmallVectorImpl<int> &PtrToPartition,
849 const RuntimePointerChecking *RtPtrChecking) {
850 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks;
852 copy_if(AllChecks, std::back_inserter(Checks),
853 [&](const RuntimePointerChecking::PointerCheck &Check) {
854 for (unsigned PtrIdx1 : Check.first->Members)
855 for (unsigned PtrIdx2 : Check.second->Members)
856 // Only include this check if there is a pair of pointers
857 // that require checking and the pointers fall into
858 // separate partitions.
860 // (Note that we already know at this point that the two
861 // pointer groups need checking but it doesn't follow
862 // that each pair of pointers within the two groups need
865 // In other words we don't want to include a check just
866 // because there is a pair of pointers between the two
867 // pointer groups that require checks and a different
868 // pair whose pointers fall into different partitions.)
869 if (RtPtrChecking->needsChecking(PtrIdx1, PtrIdx2) &&
870 !RuntimePointerChecking::arePointersInSamePartition(
871 PtrToPartition, PtrIdx1, PtrIdx2))
879 /// Check whether the loop metadata is forcing distribution to be
880 /// enabled/disabled.
882 Optional<const MDOperand *> Value =
883 findStringMetadataForLoop(L, "llvm.loop.distribute.enable");
887 const MDOperand *Op = *Value;
888 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
889 IsForced = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
897 const LoopAccessInfo *LAI = nullptr;
900 OptimizationRemarkEmitter *ORE;
902 /// Indicates whether distribution is forced to be enabled/disabled for
905 /// If the optional has a value, it indicates whether distribution was forced
906 /// to be enabled (true) or disabled (false). If the optional has no value
907 /// distribution was not forced either way.
908 Optional<bool> IsForced;
911 } // end anonymous namespace
913 /// Shared implementation between new and old PMs.
914 static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT,
915 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE,
916 std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
917 // Build up a worklist of inner-loops to vectorize. This is necessary as the
918 // act of distributing a loop creates new loops and can invalidate iterators
920 SmallVector<Loop *, 8> Worklist;
922 for (Loop *TopLevelLoop : *LI)
923 for (Loop *L : depth_first(TopLevelLoop))
924 // We only handle inner-most loops.
926 Worklist.push_back(L);
928 // Now walk the identified inner loops.
929 bool Changed = false;
930 for (Loop *L : Worklist) {
931 LoopDistributeForLoop LDL(L, &F, LI, DT, SE, ORE);
933 // If distribution was forced for the specific loop to be
934 // enabled/disabled, follow that. Otherwise use the global flag.
935 if (LDL.isForced().getValueOr(EnableLoopDistribute))
936 Changed |= LDL.processLoop(GetLAA);
939 // Process each loop nest in the function.
946 class LoopDistributeLegacy : public FunctionPass {
950 LoopDistributeLegacy() : FunctionPass(ID) {
951 // The default is set by the caller.
952 initializeLoopDistributeLegacyPass(*PassRegistry::getPassRegistry());
955 bool runOnFunction(Function &F) override {
959 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
960 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
961 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
962 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
963 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
964 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
965 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
967 return runImpl(F, LI, DT, SE, ORE, GetLAA);
970 void getAnalysisUsage(AnalysisUsage &AU) const override {
971 AU.addRequired<ScalarEvolutionWrapperPass>();
972 AU.addRequired<LoopInfoWrapperPass>();
973 AU.addPreserved<LoopInfoWrapperPass>();
974 AU.addRequired<LoopAccessLegacyAnalysis>();
975 AU.addRequired<DominatorTreeWrapperPass>();
976 AU.addPreserved<DominatorTreeWrapperPass>();
977 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
978 AU.addPreserved<GlobalsAAWrapperPass>();
982 } // end anonymous namespace
984 PreservedAnalyses LoopDistributePass::run(Function &F,
985 FunctionAnalysisManager &AM) {
986 auto &LI = AM.getResult<LoopAnalysis>(F);
987 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
988 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
989 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
991 // We don't directly need these analyses but they're required for loop
992 // analyses so provide them below.
993 auto &AA = AM.getResult<AAManager>(F);
994 auto &AC = AM.getResult<AssumptionAnalysis>(F);
995 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
996 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
998 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
999 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1000 [&](Loop &L) -> const LoopAccessInfo & {
1001 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
1002 return LAM.getResult<LoopAccessAnalysis>(L, AR);
1005 bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA);
1007 return PreservedAnalyses::all();
1008 PreservedAnalyses PA;
1009 PA.preserve<LoopAnalysis>();
1010 PA.preserve<DominatorTreeAnalysis>();
1011 PA.preserve<GlobalsAA>();
1015 char LoopDistributeLegacy::ID;
1017 static const char ldist_name[] = "Loop Distribution";
1019 INITIALIZE_PASS_BEGIN(LoopDistributeLegacy, LDIST_NAME, ldist_name, false,
1021 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1022 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
1023 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1024 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
1025 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
1026 INITIALIZE_PASS_END(LoopDistributeLegacy, LDIST_NAME, ldist_name, false, false)
1028 FunctionPass *llvm::createLoopDistributePass() { return new LoopDistributeLegacy(); }