1 //===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implement a loop-aware load elimination pass.
12 // It uses LoopAccessAnalysis to identify loop-carried dependences with a
13 // distance of one between stores and loads. These form the candidates for the
14 // transformation. The source value of each store then propagated to the user
15 // of the corresponding load. This makes the load dead.
17 // The pass can also version the loop and add memchecks in order to prove that
18 // may-aliasing stores can't change the value in memory before it's read by the
21 //===----------------------------------------------------------------------===//
23 #include "llvm/Transforms/Scalar/LoopLoadElimination.h"
24 #include "llvm/ADT/APInt.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/GlobalsModRef.h"
32 #include "llvm/Analysis/LoopAccessAnalysis.h"
33 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/Analysis/ScalarEvolution.h"
35 #include "llvm/Analysis/ScalarEvolutionExpander.h"
36 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Transforms/Scalar.h"
48 #include "llvm/Transforms/Utils/LoopVersioning.h"
51 #include <forward_list>
56 #define LLE_OPTION "loop-load-elim"
57 #define DEBUG_TYPE LLE_OPTION
61 static cl::opt<unsigned> CheckPerElim(
62 "runtime-check-per-loop-load-elim", cl::Hidden,
63 cl::desc("Max number of memchecks allowed per eliminated load on average"),
66 static cl::opt<unsigned> LoadElimSCEVCheckThreshold(
67 "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden,
68 cl::desc("The maximum number of SCEV checks allowed for Loop "
71 STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE");
75 /// \brief Represent a store-to-forwarding candidate.
76 struct StoreToLoadForwardingCandidate {
80 StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store)
81 : Load(Load), Store(Store) {}
83 /// \brief Return true if the dependence from the store to the load has a
84 /// distance of one. E.g. A[i+1] = A[i]
85 bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE,
87 Value *LoadPtr = Load->getPointerOperand();
88 Value *StorePtr = Store->getPointerOperand();
89 Type *LoadPtrType = LoadPtr->getType();
90 Type *LoadType = LoadPtrType->getPointerElementType();
92 assert(LoadPtrType->getPointerAddressSpace() ==
93 StorePtr->getType()->getPointerAddressSpace() &&
94 LoadType == StorePtr->getType()->getPointerElementType() &&
95 "Should be a known dependence");
97 // Currently we only support accesses with unit stride. FIXME: we should be
98 // able to handle non unit stirde as well as long as the stride is equal to
99 // the dependence distance.
100 if (getPtrStride(PSE, LoadPtr, L) != 1 ||
101 getPtrStride(PSE, StorePtr, L) != 1)
104 auto &DL = Load->getParent()->getModule()->getDataLayout();
105 unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType));
107 auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr));
108 auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr));
110 // We don't need to check non-wrapping here because forward/backward
111 // dependence wouldn't be valid if these weren't monotonic accesses.
112 auto *Dist = cast<SCEVConstant>(
113 PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV));
114 const APInt &Val = Dist->getAPInt();
115 return Val == TypeByteSize;
118 Value *getLoadPtr() const { return Load->getPointerOperand(); }
121 friend raw_ostream &operator<<(raw_ostream &OS,
122 const StoreToLoadForwardingCandidate &Cand) {
123 OS << *Cand.Store << " -->\n";
124 OS.indent(2) << *Cand.Load << "\n";
130 /// \brief Check if the store dominates all latches, so as long as there is no
131 /// intervening store this value will be loaded in the next iteration.
132 bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
134 SmallVector<BasicBlock *, 8> Latches;
135 L->getLoopLatches(Latches);
136 return llvm::all_of(Latches, [&](const BasicBlock *Latch) {
137 return DT->dominates(StoreBlock, Latch);
141 /// \brief Return true if the load is not executed on all paths in the loop.
142 static bool isLoadConditional(LoadInst *Load, Loop *L) {
143 return Load->getParent() != L->getHeader();
146 /// \brief The per-loop class that does most of the work.
147 class LoadEliminationForLoop {
149 LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI,
151 : L(L), LI(LI), LAI(LAI), DT(DT), PSE(LAI.getPSE()) {}
153 /// \brief Look through the loop-carried and loop-independent dependences in
154 /// this loop and find store->load dependences.
156 /// Note that no candidate is returned if LAA has failed to analyze the loop
157 /// (e.g. if it's not bottom-tested, contains volatile memops, etc.)
158 std::forward_list<StoreToLoadForwardingCandidate>
159 findStoreToLoadDependences(const LoopAccessInfo &LAI) {
160 std::forward_list<StoreToLoadForwardingCandidate> Candidates;
162 const auto *Deps = LAI.getDepChecker().getDependences();
166 // Find store->load dependences (consequently true dep). Both lexically
167 // forward and backward dependences qualify. Disqualify loads that have
168 // other unknown dependences.
170 SmallSet<Instruction *, 4> LoadsWithUnknownDepedence;
172 for (const auto &Dep : *Deps) {
173 Instruction *Source = Dep.getSource(LAI);
174 Instruction *Destination = Dep.getDestination(LAI);
176 if (Dep.Type == MemoryDepChecker::Dependence::Unknown) {
177 if (isa<LoadInst>(Source))
178 LoadsWithUnknownDepedence.insert(Source);
179 if (isa<LoadInst>(Destination))
180 LoadsWithUnknownDepedence.insert(Destination);
184 if (Dep.isBackward())
185 // Note that the designations source and destination follow the program
186 // order, i.e. source is always first. (The direction is given by the
188 std::swap(Source, Destination);
190 assert(Dep.isForward() && "Needs to be a forward dependence");
192 auto *Store = dyn_cast<StoreInst>(Source);
195 auto *Load = dyn_cast<LoadInst>(Destination);
199 // Only progagate the value if they are of the same type.
200 if (Store->getPointerOperandType() != Load->getPointerOperandType())
203 Candidates.emplace_front(Load, Store);
206 if (!LoadsWithUnknownDepedence.empty())
207 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) {
208 return LoadsWithUnknownDepedence.count(C.Load);
214 /// \brief Return the index of the instruction according to program order.
215 unsigned getInstrIndex(Instruction *Inst) {
216 auto I = InstOrder.find(Inst);
217 assert(I != InstOrder.end() && "No index for instruction");
221 /// \brief If a load has multiple candidates associated (i.e. different
222 /// stores), it means that it could be forwarding from multiple stores
223 /// depending on control flow. Remove these candidates.
225 /// Here, we rely on LAA to include the relevant loop-independent dependences.
226 /// LAA is known to omit these in the very simple case when the read and the
227 /// write within an alias set always takes place using the *same* pointer.
229 /// However, we know that this is not the case here, i.e. we can rely on LAA
230 /// to provide us with loop-independent dependences for the cases we're
231 /// interested. Consider the case for example where a loop-independent
232 /// dependece S1->S2 invalidates the forwarding S3->S2.
236 /// A[i+1] = ... (S3)
238 /// LAA will perform dependence analysis here because there are two
239 /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]).
240 void removeDependencesFromMultipleStores(
241 std::forward_list<StoreToLoadForwardingCandidate> &Candidates) {
242 // If Store is nullptr it means that we have multiple stores forwarding to
244 typedef DenseMap<LoadInst *, const StoreToLoadForwardingCandidate *>
246 LoadToSingleCandT LoadToSingleCand;
248 for (const auto &Cand : Candidates) {
250 LoadToSingleCandT::iterator Iter;
252 std::tie(Iter, NewElt) =
253 LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand));
255 const StoreToLoadForwardingCandidate *&OtherCand = Iter->second;
256 // Already multiple stores forward to this load.
257 if (OtherCand == nullptr)
260 // Handle the very basic case when the two stores are in the same block
261 // so deciding which one forwards is easy. The later one forwards as
262 // long as they both have a dependence distance of one to the load.
263 if (Cand.Store->getParent() == OtherCand->Store->getParent() &&
264 Cand.isDependenceDistanceOfOne(PSE, L) &&
265 OtherCand->isDependenceDistanceOfOne(PSE, L)) {
266 // They are in the same block, the later one will forward to the load.
267 if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store))
274 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) {
275 if (LoadToSingleCand[Cand.Load] != &Cand) {
276 DEBUG(dbgs() << "Removing from candidates: \n" << Cand
277 << " The load may have multiple stores forwarding to "
285 /// \brief Given two pointers operations by their RuntimePointerChecking
286 /// indices, return true if they require an alias check.
288 /// We need a check if one is a pointer for a candidate load and the other is
289 /// a pointer for a possibly intervening store.
290 bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2,
291 const SmallSet<Value *, 4> &PtrsWrittenOnFwdingPath,
292 const std::set<Value *> &CandLoadPtrs) {
294 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx1).PointerValue;
296 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx2).PointerValue;
297 return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) ||
298 (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1)));
301 /// \brief Return pointers that are possibly written to on the path from a
302 /// forwarding store to a load.
304 /// These pointers need to be alias-checked against the forwarding candidates.
305 SmallSet<Value *, 4> findPointersWrittenOnForwardingPath(
306 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
307 // From FirstStore to LastLoad neither of the elimination candidate loads
308 // should overlap with any of the stores.
313 // ld1 B[i] <-------,
314 // ld0 A[i] <----, | * LastLoad
317 // st3 B[i+1] -- | -' * FirstStore
321 // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with
325 std::max_element(Candidates.begin(), Candidates.end(),
326 [&](const StoreToLoadForwardingCandidate &A,
327 const StoreToLoadForwardingCandidate &B) {
328 return getInstrIndex(A.Load) < getInstrIndex(B.Load);
331 StoreInst *FirstStore =
332 std::min_element(Candidates.begin(), Candidates.end(),
333 [&](const StoreToLoadForwardingCandidate &A,
334 const StoreToLoadForwardingCandidate &B) {
335 return getInstrIndex(A.Store) <
336 getInstrIndex(B.Store);
340 // We're looking for stores after the first forwarding store until the end
341 // of the loop, then from the beginning of the loop until the last
342 // forwarded-to load. Collect the pointer for the stores.
343 SmallSet<Value *, 4> PtrsWrittenOnFwdingPath;
345 auto InsertStorePtr = [&](Instruction *I) {
346 if (auto *S = dyn_cast<StoreInst>(I))
347 PtrsWrittenOnFwdingPath.insert(S->getPointerOperand());
349 const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions();
350 std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1,
351 MemInstrs.end(), InsertStorePtr);
352 std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)],
355 return PtrsWrittenOnFwdingPath;
358 /// \brief Determine the pointer alias checks to prove that there are no
359 /// intervening stores.
360 SmallVector<RuntimePointerChecking::PointerCheck, 4> collectMemchecks(
361 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
363 SmallSet<Value *, 4> PtrsWrittenOnFwdingPath =
364 findPointersWrittenOnForwardingPath(Candidates);
366 // Collect the pointers of the candidate loads.
367 // FIXME: SmallSet does not work with std::inserter.
368 std::set<Value *> CandLoadPtrs;
369 transform(Candidates,
370 std::inserter(CandLoadPtrs, CandLoadPtrs.begin()),
371 std::mem_fn(&StoreToLoadForwardingCandidate::getLoadPtr));
373 const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks();
374 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks;
376 copy_if(AllChecks, std::back_inserter(Checks),
377 [&](const RuntimePointerChecking::PointerCheck &Check) {
378 for (auto PtrIdx1 : Check.first->Members)
379 for (auto PtrIdx2 : Check.second->Members)
380 if (needsChecking(PtrIdx1, PtrIdx2, PtrsWrittenOnFwdingPath,
386 DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size() << "):\n");
387 DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
392 /// \brief Perform the transformation for a candidate.
394 propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand,
400 // store %y, %gep_i_plus_1
405 // %x.initial = load %gep_0
407 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
408 // %x = load %gep_i <---- now dead
409 // = ... %x.storeforward
410 // store %y, %gep_i_plus_1
412 Value *Ptr = Cand.Load->getPointerOperand();
413 auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr));
414 auto *PH = L->getLoopPreheader();
415 Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
416 PH->getTerminator());
418 new LoadInst(InitialPtr, "load_initial", /* isVolatile */ false,
419 Cand.Load->getAlignment(), PH->getTerminator());
421 PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
422 &L->getHeader()->front());
423 PHI->addIncoming(Initial, PH);
424 PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch());
426 Cand.Load->replaceAllUsesWith(PHI);
429 /// \brief Top-level driver for each loop: find store->load forwarding
430 /// candidates, add run-time checks and perform transformation.
432 DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
433 << "\" checking " << *L << "\n");
434 // Look for store-to-load forwarding cases across the
440 // store %y, %gep_i_plus_1
445 // %x.initial = load %gep_0
447 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
448 // %x = load %gep_i <---- now dead
449 // = ... %x.storeforward
450 // store %y, %gep_i_plus_1
452 // First start with store->load dependences.
453 auto StoreToLoadDependences = findStoreToLoadDependences(LAI);
454 if (StoreToLoadDependences.empty())
457 // Generate an index for each load and store according to the original
458 // program order. This will be used later.
459 InstOrder = LAI.getDepChecker().generateInstructionOrderMap();
461 // To keep things simple for now, remove those where the load is potentially
462 // fed by multiple stores.
463 removeDependencesFromMultipleStores(StoreToLoadDependences);
464 if (StoreToLoadDependences.empty())
467 // Filter the candidates further.
468 SmallVector<StoreToLoadForwardingCandidate, 4> Candidates;
469 unsigned NumForwarding = 0;
470 for (const StoreToLoadForwardingCandidate Cand : StoreToLoadDependences) {
471 DEBUG(dbgs() << "Candidate " << Cand);
473 // Make sure that the stored values is available everywhere in the loop in
474 // the next iteration.
475 if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT))
478 // If the load is conditional we can't hoist its 0-iteration instance to
479 // the preheader because that would make it unconditional. Thus we would
480 // access a memory location that the original loop did not access.
481 if (isLoadConditional(Cand.Load, L))
484 // Check whether the SCEV difference is the same as the induction step,
485 // thus we load the value in the next iteration.
486 if (!Cand.isDependenceDistanceOfOne(PSE, L))
492 << ". Valid store-to-load forwarding across the loop backedge\n");
493 Candidates.push_back(Cand);
495 if (Candidates.empty())
498 // Check intervening may-alias stores. These need runtime checks for alias
500 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks =
501 collectMemchecks(Candidates);
503 // Too many checks are likely to outweigh the benefits of forwarding.
504 if (Checks.size() > Candidates.size() * CheckPerElim) {
505 DEBUG(dbgs() << "Too many run-time checks needed.\n");
509 if (LAI.getPSE().getUnionPredicate().getComplexity() >
510 LoadElimSCEVCheckThreshold) {
511 DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
515 if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) {
516 if (L->getHeader()->getParent()->optForSize()) {
517 DEBUG(dbgs() << "Versioning is needed but not allowed when optimizing "
522 if (!L->isLoopSimplifyForm()) {
523 DEBUG(dbgs() << "Loop is not is loop-simplify form");
527 // Point of no-return, start the transformation. First, version the loop
530 LoopVersioning LV(LAI, L, LI, DT, PSE.getSE(), false);
531 LV.setAliasChecks(std::move(Checks));
532 LV.setSCEVChecks(LAI.getPSE().getUnionPredicate());
536 // Next, propagate the value stored by the store to the users of the load.
537 // Also for the first iteration, generate the initial value of the load.
538 SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(),
540 for (const auto &Cand : Candidates)
541 propagateStoredValueToLoadUsers(Cand, SEE);
542 NumLoopLoadEliminted += NumForwarding;
550 /// \brief Maps the load/store instructions to their index according to
552 DenseMap<Instruction *, unsigned> InstOrder;
556 const LoopAccessInfo &LAI;
558 PredicatedScalarEvolution PSE;
562 eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT,
563 function_ref<const LoopAccessInfo &(Loop &)> GetLAI) {
564 // Build up a worklist of inner-loops to transform to avoid iterator
566 // FIXME: This logic comes from other passes that actually change the loop
567 // nest structure. It isn't clear this is necessary (or useful) for a pass
568 // which merely optimizes the use of loads in a loop.
569 SmallVector<Loop *, 8> Worklist;
571 for (Loop *TopLevelLoop : LI)
572 for (Loop *L : depth_first(TopLevelLoop))
573 // We only handle inner-most loops.
575 Worklist.push_back(L);
577 // Now walk the identified inner loops.
578 bool Changed = false;
579 for (Loop *L : Worklist) {
580 // The actual work is performed by LoadEliminationForLoop.
581 LoadEliminationForLoop LEL(L, &LI, GetLAI(*L), &DT);
582 Changed |= LEL.processLoop();
587 /// \brief The pass. Most of the work is delegated to the per-loop
588 /// LoadEliminationForLoop class.
589 class LoopLoadElimination : public FunctionPass {
591 LoopLoadElimination() : FunctionPass(ID) {
592 initializeLoopLoadEliminationPass(*PassRegistry::getPassRegistry());
595 bool runOnFunction(Function &F) override {
599 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
600 auto &LAA = getAnalysis<LoopAccessLegacyAnalysis>();
601 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
603 // Process each loop nest in the function.
604 return eliminateLoadsAcrossLoops(
606 [&LAA](Loop &L) -> const LoopAccessInfo & { return LAA.getInfo(&L); });
609 void getAnalysisUsage(AnalysisUsage &AU) const override {
610 AU.addRequiredID(LoopSimplifyID);
611 AU.addRequired<LoopInfoWrapperPass>();
612 AU.addPreserved<LoopInfoWrapperPass>();
613 AU.addRequired<LoopAccessLegacyAnalysis>();
614 AU.addRequired<ScalarEvolutionWrapperPass>();
615 AU.addRequired<DominatorTreeWrapperPass>();
616 AU.addPreserved<DominatorTreeWrapperPass>();
617 AU.addPreserved<GlobalsAAWrapperPass>();
623 } // end anonymous namespace
625 char LoopLoadElimination::ID;
626 static const char LLE_name[] = "Loop Load Elimination";
628 INITIALIZE_PASS_BEGIN(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
629 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
630 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
631 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
632 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
633 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
634 INITIALIZE_PASS_END(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
638 FunctionPass *createLoopLoadEliminationPass() {
639 return new LoopLoadElimination();
642 PreservedAnalyses LoopLoadEliminationPass::run(Function &F,
643 FunctionAnalysisManager &AM) {
644 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
645 auto &LI = AM.getResult<LoopAnalysis>(F);
646 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
647 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
648 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
649 auto &AA = AM.getResult<AAManager>(F);
650 auto &AC = AM.getResult<AssumptionAnalysis>(F);
652 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
653 bool Changed = eliminateLoadsAcrossLoops(
654 F, LI, DT, [&](Loop &L) -> const LoopAccessInfo & {
655 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI};
656 return LAM.getResult<LoopAccessAnalysis>(L, AR);
660 return PreservedAnalyses::all();
662 PreservedAnalyses PA;
666 } // end namespace llvm