1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible. It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe. This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
15 // This pass uses alias analysis for two purposes:
17 // 1. Moving loop invariant loads and calls out of loops. If we can determine
18 // that a load or call inside of a loop never aliases anything stored to,
19 // we can hoist it or sink it like any other instruction.
20 // 2. Scalar Promotion of Memory - If there is a store instruction inside of
21 // the loop, we try to move the store to happen AFTER the loop instead of
22 // inside of the loop. This can only happen if a few conditions are true:
23 // A. The pointer stored through is loop invariant
24 // B. There are no stores or loads in the loop which _may_ alias the
25 // pointer. There are no calls in the loop which mod/ref the pointer.
26 // If these conditions are true, we can promote the loads and stores in the
27 // loop of the pointer to use a temporary alloca'd variable. We then use
28 // the SSAUpdater to construct the appropriate SSA form for the value.
30 //===----------------------------------------------------------------------===//
32 #include "llvm/Transforms/Scalar/LICM.h"
33 #include "llvm/ADT/SetOperations.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/AliasSetTracker.h"
37 #include "llvm/Analysis/BasicAliasAnalysis.h"
38 #include "llvm/Analysis/CaptureTracking.h"
39 #include "llvm/Analysis/ConstantFolding.h"
40 #include "llvm/Analysis/GlobalsModRef.h"
41 #include "llvm/Analysis/GuardUtils.h"
42 #include "llvm/Analysis/Loads.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/LoopIterator.h"
45 #include "llvm/Analysis/LoopPass.h"
46 #include "llvm/Analysis/MemoryBuiltins.h"
47 #include "llvm/Analysis/MemorySSA.h"
48 #include "llvm/Analysis/MemorySSAUpdater.h"
49 #include "llvm/Analysis/MustExecute.h"
50 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
51 #include "llvm/Analysis/ScalarEvolution.h"
52 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
53 #include "llvm/Analysis/TargetLibraryInfo.h"
54 #include "llvm/Analysis/ValueTracking.h"
55 #include "llvm/IR/CFG.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugInfoMetadata.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/Dominators.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/Metadata.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/PredIteratorCache.h"
67 #include "llvm/InitializePasses.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Debug.h"
70 #include "llvm/Support/raw_ostream.h"
71 #include "llvm/Transforms/Scalar.h"
72 #include "llvm/Transforms/Scalar/LoopPassManager.h"
73 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
74 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
75 #include "llvm/Transforms/Utils/Local.h"
76 #include "llvm/Transforms/Utils/LoopUtils.h"
77 #include "llvm/Transforms/Utils/SSAUpdater.h"
82 #define DEBUG_TYPE "licm"
84 STATISTIC(NumCreatedBlocks, "Number of blocks created");
85 STATISTIC(NumClonedBranches, "Number of branches cloned");
86 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
87 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
88 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
89 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
90 STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
92 /// Memory promotion is enabled by default.
94 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
95 cl::desc("Disable memory promotion in LICM pass"));
97 static cl::opt<bool> ControlFlowHoisting(
98 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
99 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
101 static cl::opt<uint32_t> MaxNumUsesTraversed(
102 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
103 cl::desc("Max num uses visited for identifying load "
104 "invariance in loop using invariant start (default = 8)"));
106 // Default value of zero implies we use the regular alias set tracker mechanism
107 // instead of the cross product using AA to identify aliasing of the memory
108 // location we are interested in.
110 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
111 cl::desc("How many instruction to cross product using AA"));
113 // Experimental option to allow imprecision in LICM in pathological cases, in
114 // exchange for faster compile. This is to be removed if MemorySSA starts to
115 // address the same issue. This flag applies only when LICM uses MemorySSA
116 // instead on AliasSetTracker. LICM calls MemorySSAWalker's
117 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
118 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
119 // which may not be precise, since optimizeUses is capped. The result is
120 // correct, but we may not get as "far up" as possible to get which access is
121 // clobbering the one queried.
122 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
123 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
124 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
125 "for faster compile. Caps the MemorySSA clobbering calls."));
127 // Experimentally, memory promotion carries less importance than sinking and
128 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
130 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
131 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
132 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
133 "effect. When MSSA in LICM is enabled, then this is the maximum "
134 "number of accesses allowed to be present in a loop in order to "
135 "enable memory promotion."));
137 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
138 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
139 const LoopSafetyInfo *SafetyInfo,
140 TargetTransformInfo *TTI, bool &FreeInLoop);
141 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
142 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
143 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
144 OptimizationRemarkEmitter *ORE);
145 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
146 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
147 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE);
148 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
149 const DominatorTree *DT,
151 const LoopSafetyInfo *SafetyInfo,
152 OptimizationRemarkEmitter *ORE,
153 const Instruction *CtxI = nullptr);
154 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
155 AliasSetTracker *CurAST, Loop *CurLoop,
157 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
159 SinkAndHoistLICMFlags &Flags);
160 static Instruction *cloneInstructionInExitBlock(
161 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
162 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
164 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
165 AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
167 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
168 ICFLoopSafetyInfo &SafetyInfo,
169 MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
172 struct LoopInvariantCodeMotion {
173 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
174 TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
175 ScalarEvolution *SE, MemorySSA *MSSA,
176 OptimizationRemarkEmitter *ORE);
178 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
179 unsigned LicmMssaNoAccForPromotionCap)
180 : LicmMssaOptCap(LicmMssaOptCap),
181 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
184 unsigned LicmMssaOptCap;
185 unsigned LicmMssaNoAccForPromotionCap;
187 std::unique_ptr<AliasSetTracker>
188 collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA);
189 std::unique_ptr<AliasSetTracker>
190 collectAliasInfoForLoopWithMSSA(Loop *L, AAResults *AA,
191 MemorySSAUpdater *MSSAU);
194 struct LegacyLICMPass : public LoopPass {
195 static char ID; // Pass identification, replacement for typeid
197 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
198 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
199 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
200 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
203 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
207 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
208 MemorySSA *MSSA = EnableMSSALoopDependency
209 ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
211 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
212 // pass. Function analyses need to be preserved across loop transformations
213 // but ORE cannot be preserved (see comment before the pass definition).
214 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
215 return LICM.runOnLoop(L,
216 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
217 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
218 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
219 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
220 *L->getHeader()->getParent()),
221 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
222 *L->getHeader()->getParent()),
223 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
226 /// This transformation requires natural loop information & requires that
227 /// loop preheaders be inserted into the CFG...
229 void getAnalysisUsage(AnalysisUsage &AU) const override {
230 AU.addPreserved<DominatorTreeWrapperPass>();
231 AU.addPreserved<LoopInfoWrapperPass>();
232 AU.addRequired<TargetLibraryInfoWrapperPass>();
233 if (EnableMSSALoopDependency) {
234 AU.addRequired<MemorySSAWrapperPass>();
235 AU.addPreserved<MemorySSAWrapperPass>();
237 AU.addRequired<TargetTransformInfoWrapperPass>();
238 getLoopAnalysisUsage(AU);
242 LoopInvariantCodeMotion LICM;
246 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
247 LoopStandardAnalysisResults &AR, LPMUpdater &) {
248 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
249 // pass. Function analyses need to be preserved across loop transformations
250 // but ORE cannot be preserved (see comment before the pass definition).
251 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
253 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
254 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.TTI, &AR.SE,
256 return PreservedAnalyses::all();
258 auto PA = getLoopPassPreservedAnalyses();
260 PA.preserve<DominatorTreeAnalysis>();
261 PA.preserve<LoopAnalysis>();
263 PA.preserve<MemorySSAAnalysis>();
268 char LegacyLICMPass::ID = 0;
269 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
271 INITIALIZE_PASS_DEPENDENCY(LoopPass)
272 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
273 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
274 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
275 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
278 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
279 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
280 unsigned LicmMssaNoAccForPromotionCap) {
281 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
284 /// Hoist expressions out of the specified loop. Note, alias info for inner
285 /// loop is not preserved so it is not a good idea to run LICM multiple
286 /// times on one loop.
287 bool LoopInvariantCodeMotion::runOnLoop(
288 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
289 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, ScalarEvolution *SE,
290 MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) {
291 bool Changed = false;
293 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
295 // If this loop has metadata indicating that LICM is not to be performed then
297 if (hasDisableLICMTransformsHint(L)) {
301 std::unique_ptr<AliasSetTracker> CurAST;
302 std::unique_ptr<MemorySSAUpdater> MSSAU;
303 bool NoOfMemAccTooLarge = false;
304 unsigned LicmMssaOptCounter = 0;
307 LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
308 CurAST = collectAliasInfoForLoop(L, LI, AA);
310 LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n");
311 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
313 unsigned AccessCapCount = 0;
314 for (auto *BB : L->getBlocks()) {
315 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
316 for (const auto &MA : *Accesses) {
319 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
320 NoOfMemAccTooLarge = true;
325 if (NoOfMemAccTooLarge)
330 // Get the preheader block to move instructions into...
331 BasicBlock *Preheader = L->getLoopPreheader();
333 // Compute loop safety information.
334 ICFLoopSafetyInfo SafetyInfo;
335 SafetyInfo.computeLoopSafetyInfo(L);
337 // We want to visit all of the instructions in this loop... that are not parts
338 // of our subloops (they have already had their invariants hoisted out of
339 // their loop, into this loop, so there is no need to process the BODIES of
342 // Traverse the body of the loop in depth first order on the dominator tree so
343 // that we are guaranteed to see definitions before we see uses. This allows
344 // us to sink instructions in one pass, without iteration. After sinking
345 // instructions, we perform another pass to hoist them out of the loop.
346 SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter,
347 LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
349 if (L->hasDedicatedExits())
350 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
351 CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE);
352 Flags.IsSink = false;
355 hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L,
356 CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE);
358 // Now that all loop invariants have been removed from the loop, promote any
359 // memory references to scalars that we can.
360 // Don't sink stores from loops without dedicated block exits. Exits
361 // containing indirect branches are not transformed by loop simplify,
362 // make sure we catch that. An additional load may be generated in the
363 // preheader for SSA updater, so also avoid sinking when no preheader
365 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
366 !NoOfMemAccTooLarge) {
367 // Figure out the loop exits and their insertion points
368 SmallVector<BasicBlock *, 8> ExitBlocks;
369 L->getUniqueExitBlocks(ExitBlocks);
371 // We can't insert into a catchswitch.
372 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
373 return isa<CatchSwitchInst>(Exit->getTerminator());
376 if (!HasCatchSwitch) {
377 SmallVector<Instruction *, 8> InsertPts;
378 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
379 InsertPts.reserve(ExitBlocks.size());
381 MSSAInsertPts.reserve(ExitBlocks.size());
382 for (BasicBlock *ExitBlock : ExitBlocks) {
383 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
385 MSSAInsertPts.push_back(nullptr);
388 PredIteratorCache PIC;
390 bool Promoted = false;
392 // Build an AST using MSSA.
394 CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get());
396 // Loop over all of the alias sets in the tracker object.
397 for (AliasSet &AS : *CurAST) {
398 // We can promote this alias set if it has a store, if it is a "Must"
399 // alias set, if the pointer is loop invariant, and if we are not
400 // eliminating any volatile loads or stores.
401 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
402 !L->isLoopInvariant(AS.begin()->getValue()))
407 "Must alias set should have at least one pointer element in it!");
409 SmallSetVector<Value *, 8> PointerMustAliases;
410 for (const auto &ASI : AS)
411 PointerMustAliases.insert(ASI.getValue());
413 Promoted |= promoteLoopAccessesToScalars(
414 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
415 DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
418 // Once we have promoted values across the loop body we have to
419 // recursively reform LCSSA as any nested loop may now have values defined
420 // within the loop used in the outer loop.
421 // FIXME: This is really heavy handed. It would be a bit better to use an
422 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
425 formLCSSARecursively(*L, *DT, LI, SE);
431 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
432 // specifically moving instructions across the loop boundary and so it is
433 // especially in need of sanity checking here.
434 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
435 assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&
436 "Parent loop not left in LCSSA form after LICM!");
438 if (MSSAU.get() && VerifyMemorySSA)
439 MSSAU->getMemorySSA()->verifyMemorySSA();
442 SE->forgetLoopDispositions(L);
446 /// Walk the specified region of the CFG (defined by all blocks dominated by
447 /// the specified block, and that are in the current loop) in reverse depth
448 /// first order w.r.t the DominatorTree. This allows us to visit uses before
449 /// definitions, allowing us to sink a loop body in one pass without iteration.
451 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
452 DominatorTree *DT, TargetLibraryInfo *TLI,
453 TargetTransformInfo *TTI, Loop *CurLoop,
454 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
455 ICFLoopSafetyInfo *SafetyInfo,
456 SinkAndHoistLICMFlags &Flags,
457 OptimizationRemarkEmitter *ORE) {
460 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
461 CurLoop != nullptr && SafetyInfo != nullptr &&
462 "Unexpected input to sinkRegion.");
463 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
464 "Either AliasSetTracker or MemorySSA should be initialized.");
466 // We want to visit children before parents. We will enque all the parents
467 // before their children in the worklist and process the worklist in reverse
469 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
471 bool Changed = false;
472 for (DomTreeNode *DTN : reverse(Worklist)) {
473 BasicBlock *BB = DTN->getBlock();
474 // Only need to process the contents of this block if it is not part of a
475 // subloop (which would already have been processed).
476 if (inSubLoop(BB, CurLoop, LI))
479 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
480 Instruction &I = *--II;
482 // If the instruction is dead, we would try to sink it because it isn't
483 // used in the loop, instead, just delete it.
484 if (isInstructionTriviallyDead(&I, TLI)) {
485 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
486 salvageKnowledge(&I);
489 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
494 // Check to see if we can sink this instruction to the exit blocks
495 // of the loop. We can do this if the all users of the instruction are
496 // outside of the loop. In this case, it doesn't even matter if the
497 // operands of the instruction are loop invariant.
499 bool FreeInLoop = false;
500 if (!I.mayHaveSideEffects() &&
501 isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
502 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
504 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
508 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
515 if (MSSAU && VerifyMemorySSA)
516 MSSAU->getMemorySSA()->verifyMemorySSA();
521 // This is a helper class for hoistRegion to make it able to hoist control flow
522 // in order to be able to hoist phis. The way this works is that we initially
523 // start hoisting to the loop preheader, and when we see a loop invariant branch
524 // we make note of this. When we then come to hoist an instruction that's
525 // conditional on such a branch we duplicate the branch and the relevant control
526 // flow, then hoist the instruction into the block corresponding to its original
527 // block in the duplicated control flow.
528 class ControlFlowHoister {
530 // Information about the loop we are hoisting from
534 MemorySSAUpdater *MSSAU;
536 // A map of blocks in the loop to the block their instructions will be hoisted
538 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
540 // The branches that we can hoist, mapped to the block that marks a
541 // convergence point of their control flow.
542 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
545 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
546 MemorySSAUpdater *MSSAU)
547 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
549 void registerPossiblyHoistableBranch(BranchInst *BI) {
550 // We can only hoist conditional branches with loop invariant operands.
551 if (!ControlFlowHoisting || !BI->isConditional() ||
552 !CurLoop->hasLoopInvariantOperands(BI))
555 // The branch destinations need to be in the loop, and we don't gain
556 // anything by duplicating conditional branches with duplicate successors,
557 // as it's essentially the same as an unconditional branch.
558 BasicBlock *TrueDest = BI->getSuccessor(0);
559 BasicBlock *FalseDest = BI->getSuccessor(1);
560 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
561 TrueDest == FalseDest)
564 // We can hoist BI if one branch destination is the successor of the other,
565 // or both have common successor which we check by seeing if the
566 // intersection of their successors is non-empty.
567 // TODO: This could be expanded to allowing branches where both ends
568 // eventually converge to a single block.
569 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
570 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
571 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
572 BasicBlock *CommonSucc = nullptr;
573 if (TrueDestSucc.count(FalseDest)) {
574 CommonSucc = FalseDest;
575 } else if (FalseDestSucc.count(TrueDest)) {
576 CommonSucc = TrueDest;
578 set_intersect(TrueDestSucc, FalseDestSucc);
579 // If there's one common successor use that.
580 if (TrueDestSucc.size() == 1)
581 CommonSucc = *TrueDestSucc.begin();
582 // If there's more than one pick whichever appears first in the block list
583 // (we can't use the value returned by TrueDestSucc.begin() as it's
584 // unpredicatable which element gets returned).
585 else if (!TrueDestSucc.empty()) {
586 Function *F = TrueDest->getParent();
587 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
588 auto It = std::find_if(F->begin(), F->end(), IsSucc);
589 assert(It != F->end() && "Could not find successor in function");
593 // The common successor has to be dominated by the branch, as otherwise
594 // there will be some other path to the successor that will not be
595 // controlled by this branch so any phi we hoist would be controlled by the
596 // wrong condition. This also takes care of avoiding hoisting of loop back
598 // TODO: In some cases this could be relaxed if the successor is dominated
599 // by another block that's been hoisted and we can guarantee that the
600 // control flow has been replicated exactly.
601 if (CommonSucc && DT->dominates(BI, CommonSucc))
602 HoistableBranches[BI] = CommonSucc;
605 bool canHoistPHI(PHINode *PN) {
606 // The phi must have loop invariant operands.
607 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
609 // We can hoist phis if the block they are in is the target of hoistable
610 // branches which cover all of the predecessors of the block.
611 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
612 BasicBlock *BB = PN->getParent();
613 for (BasicBlock *PredBB : predecessors(BB))
614 PredecessorBlocks.insert(PredBB);
615 // If we have less predecessor blocks than predecessors then the phi will
616 // have more than one incoming value for the same block which we can't
618 // TODO: This could be handled be erasing some of the duplicate incoming
620 if (PredecessorBlocks.size() != pred_size(BB))
622 for (auto &Pair : HoistableBranches) {
623 if (Pair.second == BB) {
624 // Which blocks are predecessors via this branch depends on if the
625 // branch is triangle-like or diamond-like.
626 if (Pair.first->getSuccessor(0) == BB) {
627 PredecessorBlocks.erase(Pair.first->getParent());
628 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
629 } else if (Pair.first->getSuccessor(1) == BB) {
630 PredecessorBlocks.erase(Pair.first->getParent());
631 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
633 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
634 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
638 // PredecessorBlocks will now be empty if for every predecessor of BB we
639 // found a hoistable branch source.
640 return PredecessorBlocks.empty();
643 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
644 if (!ControlFlowHoisting)
645 return CurLoop->getLoopPreheader();
646 // If BB has already been hoisted, return that
647 if (HoistDestinationMap.count(BB))
648 return HoistDestinationMap[BB];
650 // Check if this block is conditional based on a pending branch
651 auto HasBBAsSuccessor =
652 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
653 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
654 Pair.first->getSuccessor(1) == BB);
656 auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(),
659 // If not involved in a pending branch, hoist to preheader
660 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
661 if (It == HoistableBranches.end()) {
662 LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName()
663 << " as hoist destination for " << BB->getName()
665 HoistDestinationMap[BB] = InitialPreheader;
666 return InitialPreheader;
668 BranchInst *BI = It->first;
669 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
670 HoistableBranches.end() &&
671 "BB is expected to be the target of at most one branch");
673 LLVMContext &C = BB->getContext();
674 BasicBlock *TrueDest = BI->getSuccessor(0);
675 BasicBlock *FalseDest = BI->getSuccessor(1);
676 BasicBlock *CommonSucc = HoistableBranches[BI];
677 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
679 // Create hoisted versions of blocks that currently don't have them
680 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
681 if (HoistDestinationMap.count(Orig))
682 return HoistDestinationMap[Orig];
684 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
685 HoistDestinationMap[Orig] = New;
686 DT->addNewBlock(New, HoistTarget);
687 if (CurLoop->getParentLoop())
688 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
690 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
691 << " as hoist destination for " << Orig->getName()
695 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
696 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
697 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
699 // Link up these blocks with branches.
700 if (!HoistCommonSucc->getTerminator()) {
701 // The new common successor we've generated will branch to whatever that
702 // hoist target branched to.
703 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
704 assert(TargetSucc && "Expected hoist target to have a single successor");
705 HoistCommonSucc->moveBefore(TargetSucc);
706 BranchInst::Create(TargetSucc, HoistCommonSucc);
708 if (!HoistTrueDest->getTerminator()) {
709 HoistTrueDest->moveBefore(HoistCommonSucc);
710 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
712 if (!HoistFalseDest->getTerminator()) {
713 HoistFalseDest->moveBefore(HoistCommonSucc);
714 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
717 // If BI is being cloned to what was originally the preheader then
718 // HoistCommonSucc will now be the new preheader.
719 if (HoistTarget == InitialPreheader) {
720 // Phis in the loop header now need to use the new preheader.
721 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
723 MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
724 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
725 // The new preheader dominates the loop header.
726 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
727 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
728 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
729 // The preheader hoist destination is now the new preheader, with the
730 // exception of the hoist destination of this branch.
731 for (auto &Pair : HoistDestinationMap)
732 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
733 Pair.second = HoistCommonSucc;
736 // Now finally clone BI.
738 HoistTarget->getTerminator(),
739 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
742 assert(CurLoop->getLoopPreheader() &&
743 "Hoisting blocks should not have destroyed preheader");
744 return HoistDestinationMap[BB];
749 /// Walk the specified region of the CFG (defined by all blocks dominated by
750 /// the specified block, and that are in the current loop) in depth first
751 /// order w.r.t the DominatorTree. This allows us to visit definitions before
752 /// uses, allowing us to hoist a loop body in one pass without iteration.
754 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
755 DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
756 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
757 ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
758 SinkAndHoistLICMFlags &Flags,
759 OptimizationRemarkEmitter *ORE) {
761 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
762 CurLoop != nullptr && SafetyInfo != nullptr &&
763 "Unexpected input to hoistRegion.");
764 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
765 "Either AliasSetTracker or MemorySSA should be initialized.");
767 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
769 // Keep track of instructions that have been hoisted, as they may need to be
770 // re-hoisted if they end up not dominating all of their uses.
771 SmallVector<Instruction *, 16> HoistedInstructions;
773 // For PHI hoisting to work we need to hoist blocks before their successors.
774 // We can do this by iterating through the blocks in the loop in reverse
776 LoopBlocksRPO Worklist(CurLoop);
777 Worklist.perform(LI);
778 bool Changed = false;
779 for (BasicBlock *BB : Worklist) {
780 // Only need to process the contents of this block if it is not part of a
781 // subloop (which would already have been processed).
782 if (inSubLoop(BB, CurLoop, LI))
785 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
786 Instruction &I = *II++;
787 // Try constant folding this instruction. If all the operands are
788 // constants, it is technically hoistable, but it would be better to
790 if (Constant *C = ConstantFoldInstruction(
791 &I, I.getModule()->getDataLayout(), TLI)) {
792 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C
795 CurAST->copyValue(&I, C);
796 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
797 I.replaceAllUsesWith(C);
798 if (isInstructionTriviallyDead(&I, TLI))
799 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
804 // Try hoisting the instruction out to the preheader. We can only do
805 // this if all of the operands of the instruction are loop invariant and
806 // if it is safe to hoist the instruction.
807 // TODO: It may be safe to hoist if we are hoisting to a conditional block
808 // and we have accurately duplicated the control flow from the loop header
810 if (CurLoop->hasLoopInvariantOperands(&I) &&
811 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
813 isSafeToExecuteUnconditionally(
814 I, DT, CurLoop, SafetyInfo, ORE,
815 CurLoop->getLoopPreheader()->getTerminator())) {
816 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
818 HoistedInstructions.push_back(&I);
823 // Attempt to remove floating point division out of the loop by
824 // converting it to a reciprocal multiplication.
825 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
826 CurLoop->isLoopInvariant(I.getOperand(1))) {
827 auto Divisor = I.getOperand(1);
828 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
829 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
830 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
831 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
832 ReciprocalDivisor->insertBefore(&I);
835 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
836 Product->setFastMathFlags(I.getFastMathFlags());
837 SafetyInfo->insertInstructionTo(Product, I.getParent());
838 Product->insertAfter(&I);
839 I.replaceAllUsesWith(Product);
840 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
842 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
843 SafetyInfo, MSSAU, SE, ORE);
844 HoistedInstructions.push_back(ReciprocalDivisor);
849 auto IsInvariantStart = [&](Instruction &I) {
850 using namespace PatternMatch;
851 return I.use_empty() &&
852 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
854 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
855 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
856 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
858 if ((IsInvariantStart(I) || isGuard(&I)) &&
859 CurLoop->hasLoopInvariantOperands(&I) &&
860 MustExecuteWithoutWritesBefore(I)) {
861 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
863 HoistedInstructions.push_back(&I);
868 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
869 if (CFH.canHoistPHI(PN)) {
870 // Redirect incoming blocks first to ensure that we create hoisted
871 // versions of those blocks before we hoist the phi.
872 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
873 PN->setIncomingBlock(
874 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
875 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
877 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
883 // Remember possibly hoistable branches so we can actually hoist them
885 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
886 CFH.registerPossiblyHoistableBranch(BI);
890 // If we hoisted instructions to a conditional block they may not dominate
891 // their uses that weren't hoisted (such as phis where some operands are not
892 // loop invariant). If so make them unconditional by moving them to their
893 // immediate dominator. We iterate through the instructions in reverse order
894 // which ensures that when we rehoist an instruction we rehoist its operands,
895 // and also keep track of where in the block we are rehoisting to to make sure
896 // that we rehoist instructions before the instructions that use them.
897 Instruction *HoistPoint = nullptr;
898 if (ControlFlowHoisting) {
899 for (Instruction *I : reverse(HoistedInstructions)) {
900 if (!llvm::all_of(I->uses(),
901 [&](Use &U) { return DT->dominates(I, U); })) {
902 BasicBlock *Dominator =
903 DT->getNode(I->getParent())->getIDom()->getBlock();
904 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
906 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
907 "New hoist point expected to dominate old hoist point");
908 HoistPoint = Dominator->getTerminator();
910 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
911 << HoistPoint->getParent()->getName()
912 << ": " << *I << "\n");
913 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
919 if (MSSAU && VerifyMemorySSA)
920 MSSAU->getMemorySSA()->verifyMemorySSA();
922 // Now that we've finished hoisting make sure that LI and DT are still
924 #ifdef EXPENSIVE_CHECKS
926 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
927 "Dominator tree verification failed");
935 // Return true if LI is invariant within scope of the loop. LI is invariant if
936 // CurLoop is dominated by an invariant.start representing the same memory
937 // location and size as the memory location LI loads from, and also the
938 // invariant.start has no uses.
939 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
941 Value *Addr = LI->getOperand(0);
942 const DataLayout &DL = LI->getModule()->getDataLayout();
943 const uint32_t LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
945 // if the type is i8 addrspace(x)*, we know this is the type of
946 // llvm.invariant.start operand
947 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
948 LI->getPointerAddressSpace());
949 unsigned BitcastsVisited = 0;
950 // Look through bitcasts until we reach the i8* type (this is invariant.start
952 while (Addr->getType() != PtrInt8Ty) {
953 auto *BC = dyn_cast<BitCastInst>(Addr);
954 // Avoid traversing high number of bitcast uses.
955 if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
957 Addr = BC->getOperand(0);
960 unsigned UsesVisited = 0;
961 // Traverse all uses of the load operand value, to see if invariant.start is
962 // one of the uses, and whether it dominates the load instruction.
963 for (auto *U : Addr->users()) {
964 // Avoid traversing for Load operand with high number of users.
965 if (++UsesVisited > MaxNumUsesTraversed)
967 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
968 // If there are escaping uses of invariant.start instruction, the load maybe
970 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
973 unsigned InvariantSizeInBits =
974 cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8;
975 // Confirm the invariant.start location size contains the load operand size
976 // in bits. Also, the invariant.start should dominate the load, and we
977 // should not hoist the load out of a loop that contains this dominating
979 if (LocSizeInBits <= InvariantSizeInBits &&
980 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
988 /// Return true if-and-only-if we know how to (mechanically) both hoist and
989 /// sink a given instruction out of a loop. Does not address legality
990 /// concerns such as aliasing or speculation safety.
991 bool isHoistableAndSinkableInst(Instruction &I) {
992 // Only these instructions are hoistable/sinkable.
993 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
994 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
995 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
996 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
997 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
998 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
999 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1001 /// Return true if all of the alias sets within this AST are known not to
1002 /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
1003 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1006 for (AliasSet &AS : *CurAST) {
1007 if (!AS.isForwardingAliasSet() && AS.isMod()) {
1013 for (auto *BB : L->getBlocks())
1014 if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1020 /// Return true if I is the only Instruction with a MemoryAccess in L.
1021 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1022 const MemorySSAUpdater *MSSAU) {
1023 for (auto *BB : L->getBlocks())
1024 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1026 for (const auto &Acc : *Accs) {
1027 if (isa<MemoryPhi>(&Acc))
1029 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1030 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1038 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1039 Loop *CurLoop, AliasSetTracker *CurAST,
1040 MemorySSAUpdater *MSSAU,
1041 bool TargetExecutesOncePerLoop,
1042 SinkAndHoistLICMFlags *Flags,
1043 OptimizationRemarkEmitter *ORE) {
1044 // If we don't understand the instruction, bail early.
1045 if (!isHoistableAndSinkableInst(I))
1048 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1050 assert(Flags != nullptr && "Flags cannot be null.");
1052 // Loads have extra constraints we have to verify before we can hoist them.
1053 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1054 if (!LI->isUnordered())
1055 return false; // Don't sink/hoist volatile or ordered atomic loads!
1057 // Loads from constant memory are always safe to move, even if they end up
1058 // in the same alias set as something that ends up being modified.
1059 if (AA->pointsToConstantMemory(LI->getOperand(0)))
1061 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1064 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1065 return false; // Don't risk duplicating unordered loads
1067 // This checks for an invariant.start dominating the load.
1068 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1073 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1076 Invalidated = pointerInvalidatedByLoopWithMSSA(
1077 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags);
1078 // Check loop-invariant address because this may also be a sinkable load
1079 // whose address is not necessarily loop-invariant.
1080 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1082 return OptimizationRemarkMissed(
1083 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1084 << "failed to move load with loop-invariant address "
1085 "because the loop may invalidate its value";
1088 return !Invalidated;
1089 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1090 // Don't sink or hoist dbg info; it's legal, but not useful.
1091 if (isa<DbgInfoIntrinsic>(I))
1094 // Don't sink calls which can throw.
1098 using namespace PatternMatch;
1099 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1100 // Assumes don't actually alias anything or throw
1103 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1104 // Widenable conditions don't actually alias anything or throw
1107 // Handle simple cases by querying alias analysis.
1108 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1109 if (Behavior == FMRB_DoesNotAccessMemory)
1111 if (AAResults::onlyReadsMemory(Behavior)) {
1112 // A readonly argmemonly function only reads from memory pointed to by
1113 // it's arguments with arbitrary offsets. If we can prove there are no
1114 // writes to this memory in the loop, we can hoist or sink.
1115 if (AAResults::onlyAccessesArgPointees(Behavior)) {
1116 // TODO: expand to writeable arguments
1117 for (Value *Op : CI->arg_operands())
1118 if (Op->getType()->isPointerTy()) {
1121 Invalidated = pointerInvalidatedByLoop(
1122 MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()),
1123 CurAST, CurLoop, AA);
1125 Invalidated = pointerInvalidatedByLoopWithMSSA(
1126 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop,
1134 // If this call only reads from memory and there are no writes to memory
1135 // in the loop, we can hoist or sink the call as appropriate.
1136 if (isReadOnly(CurAST, MSSAU, CurLoop))
1140 // FIXME: This should use mod/ref information to see if we can hoist or
1144 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1145 // Fences alias (most) everything to provide ordering. For the moment,
1146 // just give up if there are any other memory operations in the loop.
1148 auto Begin = CurAST->begin();
1149 assert(Begin != CurAST->end() && "must contain FI");
1150 if (std::next(Begin) != CurAST->end())
1151 // constant memory for instance, TODO: handle better
1153 auto *UniqueI = Begin->getUniqueInstruction();
1155 // other memory op, give up
1157 (void)FI; // suppress unused variable warning
1158 assert(UniqueI == FI && "AS must contain FI");
1161 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1162 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1163 if (!SI->isUnordered())
1164 return false; // Don't sink/hoist volatile or ordered atomic store!
1166 // We can only hoist a store that we can prove writes a value which is not
1167 // read or overwritten within the loop. For those cases, we fallback to
1168 // load store promotion instead. TODO: We can extend this to cases where
1169 // there is exactly one write to the location and that write dominates an
1170 // arbitrary number of reads in the loop.
1172 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1174 if (AS.isRef() || !AS.isMustAlias())
1175 // Quick exit test, handled by the full path below as well.
1177 auto *UniqueI = AS.getUniqueInstruction();
1179 // other memory op, give up
1181 assert(UniqueI == SI && "AS must contain SI");
1184 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1186 // If there are more accesses than the Promotion cap, give up, we're not
1187 // walking a list that long.
1188 if (Flags->NoOfMemAccTooLarge)
1190 // Check store only if there's still "quota" to check clobber.
1191 if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap)
1193 // If there are interfering Uses (i.e. their defining access is in the
1194 // loop), or ordered loads (stored as Defs!), don't move this store.
1195 // Could do better here, but this is conservatively correct.
1196 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1197 // moving accesses. Can also extend to dominating uses.
1198 auto *SIMD = MSSA->getMemoryAccess(SI);
1199 for (auto *BB : CurLoop->getBlocks())
1200 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1201 for (const auto &MA : *Accesses)
1202 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1203 auto *MD = MU->getDefiningAccess();
1204 if (!MSSA->isLiveOnEntryDef(MD) &&
1205 CurLoop->contains(MD->getBlock()))
1207 // Disable hoisting past potentially interfering loads. Optimized
1208 // Uses may point to an access outside the loop, as getClobbering
1209 // checks the previous iteration when walking the backedge.
1210 // FIXME: More precise: no Uses that alias SI.
1211 if (!Flags->IsSink && !MSSA->dominates(SIMD, MU))
1213 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1214 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1215 (void)LI; // Silence warning.
1216 assert(!LI->isUnordered() && "Expected unordered load");
1219 // Any call, while it may not be clobbering SI, it may be a use.
1220 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1221 // Check if the call may read from the memory locattion written
1222 // to by SI. Check CI's attributes and arguments; the number of
1223 // such checks performed is limited above by NoOfMemAccTooLarge.
1224 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1225 if (isModOrRefSet(MRI))
1231 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1232 Flags->LicmMssaOptCounter++;
1233 // If there are no clobbering Defs in the loop, store is safe to hoist.
1234 return MSSA->isLiveOnEntryDef(Source) ||
1235 !CurLoop->contains(Source->getBlock());
1239 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1241 // We've established mechanical ability and aliasing, it's up to the caller
1242 // to check fault safety
1246 /// Returns true if a PHINode is a trivially replaceable with an
1248 /// This is true when all incoming values are that instruction.
1249 /// This pattern occurs most often with LCSSA PHI nodes.
1251 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1252 for (const Value *IncValue : PN.incoming_values())
1259 /// Return true if the instruction is free in the loop.
1260 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1261 const TargetTransformInfo *TTI) {
1263 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1264 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1265 TargetTransformInfo::TCC_Free)
1267 // For a GEP, we cannot simply use getUserCost because currently it
1268 // optimistically assume that a GEP will fold into addressing mode
1269 // regardless of its users.
1270 const BasicBlock *BB = GEP->getParent();
1271 for (const User *U : GEP->users()) {
1272 const Instruction *UI = cast<Instruction>(U);
1273 if (CurLoop->contains(UI) &&
1274 (BB != UI->getParent() ||
1275 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1280 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1281 TargetTransformInfo::TCC_Free;
1284 /// Return true if the only users of this instruction are outside of
1285 /// the loop. If this is true, we can sink the instruction to the exit
1286 /// blocks of the loop.
1288 /// We also return true if the instruction could be folded away in lowering.
1289 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1290 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1291 const LoopSafetyInfo *SafetyInfo,
1292 TargetTransformInfo *TTI, bool &FreeInLoop) {
1293 const auto &BlockColors = SafetyInfo->getBlockColors();
1294 bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1295 for (const User *U : I.users()) {
1296 const Instruction *UI = cast<Instruction>(U);
1297 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1298 const BasicBlock *BB = PN->getParent();
1299 // We cannot sink uses in catchswitches.
1300 if (isa<CatchSwitchInst>(BB->getTerminator()))
1303 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1304 // phi use is too muddled.
1305 if (isa<CallInst>(I))
1306 if (!BlockColors.empty() &&
1307 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1311 if (CurLoop->contains(UI)) {
1322 static Instruction *cloneInstructionInExitBlock(
1323 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1324 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1326 if (auto *CI = dyn_cast<CallInst>(&I)) {
1327 const auto &BlockColors = SafetyInfo->getBlockColors();
1329 // Sinking call-sites need to be handled differently from other
1330 // instructions. The cloned call-site needs a funclet bundle operand
1331 // appropriate for its location in the CFG.
1332 SmallVector<OperandBundleDef, 1> OpBundles;
1333 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1334 BundleIdx != BundleEnd; ++BundleIdx) {
1335 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1336 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1339 OpBundles.emplace_back(Bundle);
1342 if (!BlockColors.empty()) {
1343 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1344 assert(CV.size() == 1 && "non-unique color for exit block!");
1345 BasicBlock *BBColor = CV.front();
1346 Instruction *EHPad = BBColor->getFirstNonPHI();
1347 if (EHPad->isEHPad())
1348 OpBundles.emplace_back("funclet", EHPad);
1351 New = CallInst::Create(CI, OpBundles);
1356 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1357 if (!I.getName().empty())
1358 New->setName(I.getName() + ".le");
1360 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1361 // Create a new MemoryAccess and let MemorySSA set its defining access.
1362 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1363 New, nullptr, New->getParent(), MemorySSA::Beginning);
1365 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1366 MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1368 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1369 MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1374 // Build LCSSA PHI nodes for any in-loop operands. Note that this is
1375 // particularly cheap because we can rip off the PHI node that we're
1376 // replacing for the number and blocks of the predecessors.
1377 // OPT: If this shows up in a profile, we can instead finish sinking all
1378 // invariant instructions, and then walk their operands to re-establish
1379 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1380 // sinking bottom-up.
1381 for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
1383 if (Instruction *OInst = dyn_cast<Instruction>(*OI))
1384 if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
1385 if (!OLoop->contains(&PN)) {
1387 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1388 OInst->getName() + ".lcssa", &ExitBlock.front());
1389 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1390 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1396 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1397 AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1399 AST->deleteValue(&I);
1401 MSSAU->removeMemoryAccess(&I);
1402 SafetyInfo.removeInstruction(&I);
1403 I.eraseFromParent();
1406 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1407 ICFLoopSafetyInfo &SafetyInfo,
1408 MemorySSAUpdater *MSSAU,
1409 ScalarEvolution *SE) {
1410 SafetyInfo.removeInstruction(&I);
1411 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1412 I.moveBefore(&Dest);
1414 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1415 MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1416 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1417 MemorySSA::BeforeTerminator);
1419 SE->forgetValue(&I);
1422 static Instruction *sinkThroughTriviallyReplaceablePHI(
1423 PHINode *TPN, Instruction *I, LoopInfo *LI,
1424 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1425 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1426 MemorySSAUpdater *MSSAU) {
1427 assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1428 "Expect only trivially replaceable PHI");
1429 BasicBlock *ExitBlock = TPN->getParent();
1431 auto It = SunkCopies.find(ExitBlock);
1432 if (It != SunkCopies.end())
1435 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1436 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1440 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1441 BasicBlock *BB = PN->getParent();
1442 if (!BB->canSplitPredecessors())
1444 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1445 // it require updating BlockColors for all offspring blocks accordingly. By
1446 // skipping such corner case, we can make updating BlockColors after splitting
1447 // predecessor fairly simple.
1448 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1450 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1451 BasicBlock *BBPred = *PI;
1452 if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1453 isa<CallBrInst>(BBPred->getTerminator()))
1459 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1460 LoopInfo *LI, const Loop *CurLoop,
1461 LoopSafetyInfo *SafetyInfo,
1462 MemorySSAUpdater *MSSAU) {
1464 SmallVector<BasicBlock *, 32> ExitBlocks;
1465 CurLoop->getUniqueExitBlocks(ExitBlocks);
1466 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1469 BasicBlock *ExitBB = PN->getParent();
1470 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1472 // Split predecessors of the loop exit to make instructions in the loop are
1473 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1474 // loop in the canonical form where each predecessor of each exit block should
1475 // be contained within the loop. For example, this will convert the loop below
1485 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1491 // br %LE.split, %LB2
1494 // br %LE.split2, %LB1
1496 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1499 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1502 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1504 const auto &BlockColors = SafetyInfo->getBlockColors();
1505 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1506 while (!PredBBs.empty()) {
1507 BasicBlock *PredBB = *PredBBs.begin();
1508 assert(CurLoop->contains(PredBB) &&
1509 "Expect all predecessors are in the loop");
1510 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1511 BasicBlock *NewPred = SplitBlockPredecessors(
1512 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1513 // Since we do not allow splitting EH-block with BlockColors in
1514 // canSplitPredecessors(), we can simply assign predecessor's color to
1516 if (!BlockColors.empty())
1517 // Grab a reference to the ColorVector to be inserted before getting the
1518 // reference to the vector we are copying because inserting the new
1519 // element in BlockColors might cause the map to be reallocated.
1520 SafetyInfo->copyColors(NewPred, PredBB);
1522 PredBBs.remove(PredBB);
1526 /// When an instruction is found to only be used outside of the loop, this
1527 /// function moves it to the exit blocks and patches up SSA form as needed.
1528 /// This method is guaranteed to remove the original instruction from its
1529 /// position, and may either delete it or move it to outside of the loop.
1531 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1532 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1533 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) {
1534 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1536 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1537 << "sinking " << ore::NV("Inst", &I);
1539 bool Changed = false;
1540 if (isa<LoadInst>(I))
1542 else if (isa<CallInst>(I))
1546 // Iterate over users to be ready for actual sinking. Replace users via
1547 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1548 SmallPtrSet<Instruction *, 8> VisitedUsers;
1549 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1550 auto *User = cast<Instruction>(*UI);
1551 Use &U = UI.getUse();
1554 if (VisitedUsers.count(User) || CurLoop->contains(User))
1557 if (!DT->isReachableFromEntry(User->getParent())) {
1558 U = UndefValue::get(I.getType());
1563 // The user must be a PHI node.
1564 PHINode *PN = cast<PHINode>(User);
1566 // Surprisingly, instructions can be used outside of loops without any
1567 // exits. This can only happen in PHI nodes if the incoming block is
1569 BasicBlock *BB = PN->getIncomingBlock(U);
1570 if (!DT->isReachableFromEntry(BB)) {
1571 U = UndefValue::get(I.getType());
1576 VisitedUsers.insert(PN);
1577 if (isTriviallyReplaceablePHI(*PN, I))
1580 if (!canSplitPredecessors(PN, SafetyInfo))
1583 // Split predecessors of the PHI so that we can make users trivially
1585 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1587 // Should rebuild the iterators, as they may be invalidated by
1588 // splitPredecessorsOfLoopExit().
1589 UI = I.user_begin();
1593 if (VisitedUsers.empty())
1597 SmallVector<BasicBlock *, 32> ExitBlocks;
1598 CurLoop->getUniqueExitBlocks(ExitBlocks);
1599 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1603 // Clones of this instruction. Don't create more than one per exit block!
1604 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1606 // If this instruction is only used outside of the loop, then all users are
1607 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1609 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1610 for (auto *UI : Users) {
1611 auto *User = cast<Instruction>(UI);
1613 if (CurLoop->contains(User))
1616 PHINode *PN = cast<PHINode>(User);
1617 assert(ExitBlockSet.count(PN->getParent()) &&
1618 "The LCSSA PHI is not in an exit block!");
1619 // The PHI must be trivially replaceable.
1620 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1621 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1622 PN->replaceAllUsesWith(New);
1623 eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1629 /// When an instruction is found to only use loop invariant operands that
1630 /// is safe to hoist, this instruction is called to do the dirty work.
1632 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1633 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1634 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1635 OptimizationRemarkEmitter *ORE) {
1636 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << I
1639 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1640 << ore::NV("Inst", &I);
1643 // Metadata can be dependent on conditions we are hoisting above.
1644 // Conservatively strip all metadata on the instruction unless we were
1645 // guaranteed to execute I if we entered the loop, in which case the metadata
1646 // is valid in the loop preheader.
1647 if (I.hasMetadataOtherThanDebugLoc() &&
1648 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1649 // time in isGuaranteedToExecute if we don't actually have anything to
1650 // drop. It is a compile time optimization, not required for correctness.
1651 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1652 I.dropUnknownNonDebugMetadata();
1654 if (isa<PHINode>(I))
1655 // Move the new node to the end of the phi list in the destination block.
1656 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1658 // Move the new node to the destination block, before its terminator.
1659 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1661 // Apply line 0 debug locations when we are moving instructions to different
1662 // basic blocks because we want to avoid jumpy line tables.
1663 if (const DebugLoc &DL = I.getDebugLoc())
1664 I.setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
1666 if (isa<LoadInst>(I))
1668 else if (isa<CallInst>(I))
1673 /// Only sink or hoist an instruction if it is not a trapping instruction,
1674 /// or if the instruction is known not to trap when moved to the preheader.
1675 /// or if it is a trapping instruction and is guaranteed to execute.
1676 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1677 const DominatorTree *DT,
1678 const Loop *CurLoop,
1679 const LoopSafetyInfo *SafetyInfo,
1680 OptimizationRemarkEmitter *ORE,
1681 const Instruction *CtxI) {
1682 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
1685 bool GuaranteedToExecute =
1686 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1688 if (!GuaranteedToExecute) {
1689 auto *LI = dyn_cast<LoadInst>(&Inst);
1690 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1692 return OptimizationRemarkMissed(
1693 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1694 << "failed to hoist load with loop-invariant address "
1695 "because load is conditionally executed";
1699 return GuaranteedToExecute;
1703 class LoopPromoter : public LoadAndStorePromoter {
1704 Value *SomePtr; // Designated pointer to store to.
1705 const SmallSetVector<Value *, 8> &PointerMustAliases;
1706 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1707 SmallVectorImpl<Instruction *> &LoopInsertPts;
1708 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1709 PredIteratorCache &PredCache;
1710 AliasSetTracker &AST;
1711 MemorySSAUpdater *MSSAU;
1715 bool UnorderedAtomic;
1717 ICFLoopSafetyInfo &SafetyInfo;
1719 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1720 if (Instruction *I = dyn_cast<Instruction>(V))
1721 if (Loop *L = LI.getLoopFor(I->getParent()))
1722 if (!L->contains(BB)) {
1723 // We need to create an LCSSA PHI node for the incoming value and
1725 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1726 I->getName() + ".lcssa", &BB->front());
1727 for (BasicBlock *Pred : PredCache.get(BB))
1728 PN->addIncoming(I, Pred);
1735 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1736 const SmallSetVector<Value *, 8> &PMA,
1737 SmallVectorImpl<BasicBlock *> &LEB,
1738 SmallVectorImpl<Instruction *> &LIP,
1739 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1740 AliasSetTracker &ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1741 DebugLoc dl, int alignment, bool UnorderedAtomic,
1742 const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1743 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1744 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1745 PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1746 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1747 SafetyInfo(SafetyInfo) {}
1749 bool isInstInList(Instruction *I,
1750 const SmallVectorImpl<Instruction *> &) const override {
1752 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1753 Ptr = LI->getOperand(0);
1755 Ptr = cast<StoreInst>(I)->getPointerOperand();
1756 return PointerMustAliases.count(Ptr);
1759 void doExtraRewritesBeforeFinalDeletion() override {
1760 // Insert stores after in the loop exit blocks. Each exit block gets a
1761 // store of the live-out values that feed them. Since we've already told
1762 // the SSA updater about the defs in the loop and the preheader
1763 // definition, it is all set and we can start using it.
1764 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1765 BasicBlock *ExitBlock = LoopExitBlocks[i];
1766 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1767 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1768 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1769 Instruction *InsertPos = LoopInsertPts[i];
1770 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1771 if (UnorderedAtomic)
1772 NewSI->setOrdering(AtomicOrdering::Unordered);
1773 NewSI->setAlignment(Align(Alignment));
1774 NewSI->setDebugLoc(DL);
1776 NewSI->setAAMetadata(AATags);
1779 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1780 MemoryAccess *NewMemAcc;
1781 if (!MSSAInsertPoint) {
1782 NewMemAcc = MSSAU->createMemoryAccessInBB(
1783 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1786 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1788 MSSAInsertPts[i] = NewMemAcc;
1789 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1790 // FIXME: true for safety, false may still be correct.
1795 void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1796 // Update alias analysis.
1797 AST.copyValue(LI, V);
1799 void instructionDeleted(Instruction *I) const override {
1800 SafetyInfo.removeInstruction(I);
1803 MSSAU->removeMemoryAccess(I);
1808 /// Return true iff we can prove that a caller of this function can not inspect
1809 /// the contents of the provided object in a well defined program.
1810 bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) {
1811 if (isa<AllocaInst>(Object))
1812 // Since the alloca goes out of scope, we know the caller can't retain a
1813 // reference to it and be well defined. Thus, we don't need to check for
1817 // For all other objects we need to know that the caller can't possibly
1818 // have gotten a reference to the object. There are two components of
1820 // 1) Object can't be escaped by this function. This is what
1821 // PointerMayBeCaptured checks.
1822 // 2) Object can't have been captured at definition site. For this, we
1823 // need to know the return value is noalias. At the moment, we use a
1824 // weaker condition and handle only AllocLikeFunctions (which are
1825 // known to be noalias). TODO
1826 return isAllocLikeFn(Object, TLI) &&
1827 !PointerMayBeCaptured(Object, true, true);
1832 /// Try to promote memory values to scalars by sinking stores out of the
1833 /// loop and moving loads to before the loop. We do this by looping over
1834 /// the stores in the loop, looking for stores to Must pointers which are
1837 bool llvm::promoteLoopAccessesToScalars(
1838 const SmallSetVector<Value *, 8> &PointerMustAliases,
1839 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1840 SmallVectorImpl<Instruction *> &InsertPts,
1841 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1842 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1843 Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
1844 ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
1846 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1847 CurAST != nullptr && SafetyInfo != nullptr &&
1848 "Unexpected Input to promoteLoopAccessesToScalars");
1850 Value *SomePtr = *PointerMustAliases.begin();
1851 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1853 // It is not safe to promote a load/store from the loop if the load/store is
1854 // conditional. For example, turning:
1856 // for () { if (c) *P += 1; }
1860 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1862 // is not safe, because *P may only be valid to access if 'c' is true.
1864 // The safety property divides into two parts:
1865 // p1) The memory may not be dereferenceable on entry to the loop. In this
1866 // case, we can't insert the required load in the preheader.
1867 // p2) The memory model does not allow us to insert a store along any dynamic
1868 // path which did not originally have one.
1870 // If at least one store is guaranteed to execute, both properties are
1871 // satisfied, and promotion is legal.
1873 // This, however, is not a necessary condition. Even if no store/load is
1874 // guaranteed to execute, we can still establish these properties.
1875 // We can establish (p1) by proving that hoisting the load into the preheader
1876 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1877 // can use any access within the alias set to prove dereferenceability,
1878 // since they're all must alias.
1880 // There are two ways establish (p2):
1881 // a) Prove the location is thread-local. In this case the memory model
1882 // requirement does not apply, and stores are safe to insert.
1883 // b) Prove a store dominates every exit block. In this case, if an exit
1884 // blocks is reached, the original dynamic path would have taken us through
1885 // the store, so inserting a store into the exit block is safe. Note that this
1886 // is different from the store being guaranteed to execute. For instance,
1887 // if an exception is thrown on the first iteration of the loop, the original
1888 // store is never executed, but the exit blocks are not executed either.
1890 bool DereferenceableInPH = false;
1891 bool SafeToInsertStore = false;
1893 SmallVector<Instruction *, 64> LoopUses;
1895 // We start with an alignment of one and try to find instructions that allow
1896 // us to prove better alignment.
1898 // Keep track of which types of access we see
1899 bool SawUnorderedAtomic = false;
1900 bool SawNotAtomic = false;
1903 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
1905 bool IsKnownThreadLocalObject = false;
1906 if (SafetyInfo->anyBlockMayThrow()) {
1907 // If a loop can throw, we have to insert a store along each unwind edge.
1908 // That said, we can't actually make the unwind edge explicit. Therefore,
1909 // we have to prove that the store is dead along the unwind edge. We do
1910 // this by proving that the caller can't have a reference to the object
1911 // after return and thus can't possibly load from the object.
1912 Value *Object = GetUnderlyingObject(SomePtr, MDL);
1913 if (!isKnownNonEscaping(Object, TLI))
1915 // Subtlety: Alloca's aren't visible to callers, but *are* potentially
1916 // visible to other threads if captured and used during their lifetimes.
1917 IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
1920 // Check that all of the pointers in the alias set have the same type. We
1921 // cannot (yet) promote a memory location that is loaded and stored in
1922 // different sizes. While we are at it, collect alignment and AA info.
1923 for (Value *ASIV : PointerMustAliases) {
1924 // Check that all of the pointers in the alias set have the same type. We
1925 // cannot (yet) promote a memory location that is loaded and stored in
1927 if (SomePtr->getType() != ASIV->getType())
1930 for (User *U : ASIV->users()) {
1931 // Ignore instructions that are outside the loop.
1932 Instruction *UI = dyn_cast<Instruction>(U);
1933 if (!UI || !CurLoop->contains(UI))
1936 // If there is an non-load/store instruction in the loop, we can't promote
1938 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
1939 if (!Load->isUnordered())
1942 SawUnorderedAtomic |= Load->isAtomic();
1943 SawNotAtomic |= !Load->isAtomic();
1945 Align InstAlignment = Load->getAlign();
1947 // Note that proving a load safe to speculate requires proving
1948 // sufficient alignment at the target location. Proving it guaranteed
1949 // to execute does as well. Thus we can increase our guaranteed
1950 // alignment as well.
1951 if (!DereferenceableInPH || (InstAlignment > Alignment))
1952 if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo,
1953 ORE, Preheader->getTerminator())) {
1954 DereferenceableInPH = true;
1955 Alignment = std::max(Alignment, InstAlignment);
1957 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
1958 // Stores *of* the pointer are not interesting, only stores *to* the
1960 if (UI->getOperand(1) != ASIV)
1962 if (!Store->isUnordered())
1965 SawUnorderedAtomic |= Store->isAtomic();
1966 SawNotAtomic |= !Store->isAtomic();
1968 // If the store is guaranteed to execute, both properties are satisfied.
1969 // We may want to check if a store is guaranteed to execute even if we
1970 // already know that promotion is safe, since it may have higher
1971 // alignment than any other guaranteed stores, in which case we can
1972 // raise the alignment on the promoted store.
1973 Align InstAlignment = Store->getAlign();
1975 if (!DereferenceableInPH || !SafeToInsertStore ||
1976 (InstAlignment > Alignment)) {
1977 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
1978 DereferenceableInPH = true;
1979 SafeToInsertStore = true;
1980 Alignment = std::max(Alignment, InstAlignment);
1984 // If a store dominates all exit blocks, it is safe to sink.
1985 // As explained above, if an exit block was executed, a dominating
1986 // store must have been executed at least once, so we are not
1987 // introducing stores on paths that did not have them.
1988 // Note that this only looks at explicit exit blocks. If we ever
1989 // start sinking stores into unwind edges (see above), this will break.
1990 if (!SafeToInsertStore)
1991 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
1992 return DT->dominates(Store->getParent(), Exit);
1995 // If the store is not guaranteed to execute, we may still get
1996 // deref info through it.
1997 if (!DereferenceableInPH) {
1998 DereferenceableInPH = isDereferenceableAndAlignedPointer(
1999 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2000 Store->getAlign(), MDL, Preheader->getTerminator(), DT);
2003 return false; // Not a load or store.
2005 // Merge the AA tags.
2006 if (LoopUses.empty()) {
2007 // On the first load/store, just take its AA tags.
2008 UI->getAAMetadata(AATags);
2009 } else if (AATags) {
2010 UI->getAAMetadata(AATags, /* Merge = */ true);
2013 LoopUses.push_back(UI);
2017 // If we found both an unordered atomic instruction and a non-atomic memory
2018 // access, bail. We can't blindly promote non-atomic to atomic since we
2019 // might not be able to lower the result. We can't downgrade since that
2020 // would violate memory model. Also, align 0 is an error for atomics.
2021 if (SawUnorderedAtomic && SawNotAtomic)
2024 // If we're inserting an atomic load in the preheader, we must be able to
2025 // lower it. We're only guaranteed to be able to lower naturally aligned
2027 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2028 if (SawUnorderedAtomic &&
2029 Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2032 // If we couldn't prove we can hoist the load, bail.
2033 if (!DereferenceableInPH)
2036 // We know we can hoist the load, but don't have a guaranteed store.
2037 // Check whether the location is thread-local. If it is, then we can insert
2038 // stores along paths which originally didn't have them without violating the
2040 if (!SafeToInsertStore) {
2041 if (IsKnownThreadLocalObject)
2042 SafeToInsertStore = true;
2044 Value *Object = GetUnderlyingObject(SomePtr, MDL);
2046 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2047 !PointerMayBeCaptured(Object, true, true);
2051 // If we've still failed to prove we can sink the store, give up.
2052 if (!SafeToInsertStore)
2055 // Otherwise, this is safe to promote, lets do it!
2056 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2059 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2061 << "Moving accesses to memory location out of the loop";
2065 // Look at all the loop uses, and try to merge their locations.
2066 std::vector<const DILocation *> LoopUsesLocs;
2067 for (auto U : LoopUses)
2068 LoopUsesLocs.push_back(U->getDebugLoc().get());
2069 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2071 // We use the SSAUpdater interface to insert phi nodes as required.
2072 SmallVector<PHINode *, 16> NewPHIs;
2073 SSAUpdater SSA(&NewPHIs);
2074 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2075 InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL,
2076 Alignment.value(), SawUnorderedAtomic, AATags,
2079 // Set up the preheader to have a definition of the value. It is the live-out
2080 // value from the preheader that uses in the loop will use.
2081 LoadInst *PreheaderLoad = new LoadInst(
2082 SomePtr->getType()->getPointerElementType(), SomePtr,
2083 SomePtr->getName() + ".promoted", Preheader->getTerminator());
2084 if (SawUnorderedAtomic)
2085 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2086 PreheaderLoad->setAlignment(Alignment);
2087 PreheaderLoad->setDebugLoc(DebugLoc());
2089 PreheaderLoad->setAAMetadata(AATags);
2090 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2093 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2094 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2095 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2096 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2099 if (MSSAU && VerifyMemorySSA)
2100 MSSAU->getMemorySSA()->verifyMemorySSA();
2101 // Rewrite all the loads in the loop and remember all the definitions from
2102 // stores in the loop.
2103 Promoter.run(LoopUses);
2105 if (MSSAU && VerifyMemorySSA)
2106 MSSAU->getMemorySSA()->verifyMemorySSA();
2107 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2108 if (PreheaderLoad->use_empty())
2109 eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2114 /// Returns an owning pointer to an alias set which incorporates aliasing info
2115 /// from L and all subloops of L.
2116 std::unique_ptr<AliasSetTracker>
2117 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2119 auto CurAST = std::make_unique<AliasSetTracker>(*AA);
2121 // Add everything from all the sub loops.
2122 for (Loop *InnerL : L->getSubLoops())
2123 for (BasicBlock *BB : InnerL->blocks())
2126 // And merge in this loop (without anything from inner loops).
2127 for (BasicBlock *BB : L->blocks())
2128 if (LI->getLoopFor(BB) == L)
2134 std::unique_ptr<AliasSetTracker>
2135 LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA(
2136 Loop *L, AAResults *AA, MemorySSAUpdater *MSSAU) {
2137 auto *MSSA = MSSAU->getMemorySSA();
2138 auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L);
2139 CurAST->addAllInstructionsInLoopUsingMSSA();
2143 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2144 AliasSetTracker *CurAST, Loop *CurLoop,
2146 // First check to see if any of the basic blocks in CurLoop invalidate *V.
2147 bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2149 if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2150 return isInvalidatedAccordingToAST;
2152 // Check with a diagnostic analysis if we can refine the information above.
2153 // This is to identify the limitations of using the AST.
2154 // The alias set mechanism used by LICM has a major weakness in that it
2155 // combines all things which may alias into a single set *before* asking
2156 // modref questions. As a result, a single readonly call within a loop will
2157 // collapse all loads and stores into a single alias set and report
2158 // invalidation if the loop contains any store. For example, readonly calls
2159 // with deopt states have this form and create a general alias set with all
2160 // loads and stores. In order to get any LICM in loops containing possible
2161 // deopt states we need a more precise invalidation of checking the mod ref
2162 // info of each instruction within the loop and LI. This has a complexity of
2163 // O(N^2), so currently, it is used only as a diagnostic tool since the
2164 // default value of LICMN2Threshold is zero.
2166 // Don't look at nested loops.
2167 if (CurLoop->begin() != CurLoop->end())
2171 for (BasicBlock *BB : CurLoop->getBlocks())
2172 for (Instruction &I : *BB) {
2173 if (N >= LICMN2Theshold) {
2174 LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "
2175 << *(MemLoc.Ptr) << "\n");
2179 auto Res = AA->getModRefInfo(&I, MemLoc);
2180 if (isModSet(Res)) {
2181 LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "
2182 << *(MemLoc.Ptr) << "\n");
2186 LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n");
2190 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2192 SinkAndHoistLICMFlags &Flags) {
2193 // For hoisting, use the walker to determine safety
2194 if (!Flags.IsSink) {
2195 MemoryAccess *Source;
2196 // See declaration of SetLicmMssaOptCap for usage details.
2197 if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap)
2198 Source = MU->getDefiningAccess();
2200 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2201 Flags.LicmMssaOptCounter++;
2203 return !MSSA->isLiveOnEntryDef(Source) &&
2204 CurLoop->contains(Source->getBlock());
2207 // For sinking, we'd need to check all Defs below this use. The getClobbering
2208 // call will look on the backedge of the loop, but will check aliasing with
2209 // the instructions on the previous iteration.
2212 // load a[i] ( Use (LoE)
2213 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2215 // The load sees no clobbering inside the loop, as the backedge alias check
2216 // does phi translation, and will check aliasing against store a[i-1].
2217 // However sinking the load outside the loop, below the store is incorrect.
2219 // For now, only sink if there are no Defs in the loop, and the existing ones
2220 // precede the use and are in the same block.
2221 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2222 // needs PostDominatorTreeAnalysis.
2223 // FIXME: More precise: no Defs that alias this Use.
2224 if (Flags.NoOfMemAccTooLarge)
2226 for (auto *BB : CurLoop->getBlocks())
2227 if (auto *Accesses = MSSA->getBlockDefs(BB))
2228 for (const auto &MA : *Accesses)
2229 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2230 if (MU->getBlock() != MD->getBlock() ||
2231 !MSSA->locallyDominates(MD, MU))
2236 /// Little predicate that returns true if the specified basic block is in
2237 /// a subloop of the current one, not the current one itself.
2239 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2240 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2241 return LI->getLoopFor(BB) != CurLoop;