1 //===- GVNHoist.cpp - Hoist scalar and load expressions -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass hoists expressions from branches to a common dominator. It uses
11 // GVN (global value numbering) to discover expressions computing the same
12 // values. The primary goal is to reduce the code size, and in some
13 // cases reduce critical path (by exposing more ILP).
14 // Hoisting may affect the performance in some cases. To mitigate that, hoisting
15 // is disabled in the following cases.
16 // 1. Scalars across calls.
17 // 2. geps when corresponding load/store cannot be hoisted.
18 //===----------------------------------------------------------------------===//
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Transforms/Scalar.h"
25 #include "llvm/Transforms/Scalar/GVN.h"
26 #include "llvm/Transforms/Utils/MemorySSA.h"
30 #define DEBUG_TYPE "gvn-hoist"
32 STATISTIC(NumHoisted, "Number of instructions hoisted");
33 STATISTIC(NumRemoved, "Number of instructions removed");
34 STATISTIC(NumLoadsHoisted, "Number of loads hoisted");
35 STATISTIC(NumLoadsRemoved, "Number of loads removed");
36 STATISTIC(NumStoresHoisted, "Number of stores hoisted");
37 STATISTIC(NumStoresRemoved, "Number of stores removed");
38 STATISTIC(NumCallsHoisted, "Number of calls hoisted");
39 STATISTIC(NumCallsRemoved, "Number of calls removed");
42 MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1),
43 cl::desc("Max number of instructions to hoist "
44 "(default unlimited = -1)"));
45 static cl::opt<int> MaxNumberOfBBSInPath(
46 "gvn-hoist-max-bbs", cl::Hidden, cl::init(4),
47 cl::desc("Max number of basic blocks on the path between "
48 "hoisting locations (default = 4, unlimited = -1)"));
52 // Provides a sorting function based on the execution order of two instructions.
55 DenseMap<const BasicBlock *, unsigned> &DFSNumber;
58 SortByDFSIn(DenseMap<const BasicBlock *, unsigned> &D) : DFSNumber(D) {}
60 // Returns true when A executes before B.
61 bool operator()(const Instruction *A, const Instruction *B) const {
62 // FIXME: libc++ has a std::sort() algorithm that will call the compare
63 // function on the same element. Once PR20837 is fixed and some more years
64 // pass by and all the buildbots have moved to a corrected std::sort(),
65 // enable the following assert:
69 const BasicBlock *BA = A->getParent();
70 const BasicBlock *BB = B->getParent();
71 unsigned NA = DFSNumber[BA];
72 unsigned NB = DFSNumber[BB];
76 // Sort them in the order they occur in the same basic block.
77 BasicBlock::const_iterator AI(A), BI(B);
78 return std::distance(AI, BI) < 0;
84 // A map from a pair of VNs to all the instructions with those VNs.
85 typedef DenseMap<std::pair<unsigned, unsigned>, SmallVector<Instruction *, 4>>
87 // An invalid value number Used when inserting a single value number into
89 enum : unsigned { InvalidVN = ~2U };
91 // Records all scalar instructions candidate for code hoisting.
93 VNtoInsns VNtoScalars;
96 // Inserts I and its value number in VNtoScalars.
97 void insert(Instruction *I, GVN::ValueTable &VN) {
98 // Scalar instruction.
99 unsigned V = VN.lookupOrAdd(I);
100 VNtoScalars[{V, InvalidVN}].push_back(I);
103 const VNtoInsns &getVNTable() const { return VNtoScalars; }
106 // Records all load instructions candidate for code hoisting.
111 // Insert Load and the value number of its memory address in VNtoLoads.
112 void insert(LoadInst *Load, GVN::ValueTable &VN) {
113 if (Load->isSimple()) {
114 unsigned V = VN.lookupOrAdd(Load->getPointerOperand());
115 VNtoLoads[{V, InvalidVN}].push_back(Load);
119 const VNtoInsns &getVNTable() const { return VNtoLoads; }
122 // Records all store instructions candidate for code hoisting.
124 VNtoInsns VNtoStores;
127 // Insert the Store and a hash number of the store address and the stored
128 // value in VNtoStores.
129 void insert(StoreInst *Store, GVN::ValueTable &VN) {
130 if (!Store->isSimple())
132 // Hash the store address and the stored value.
133 Value *Ptr = Store->getPointerOperand();
134 Value *Val = Store->getValueOperand();
135 VNtoStores[{VN.lookupOrAdd(Ptr), VN.lookupOrAdd(Val)}].push_back(Store);
138 const VNtoInsns &getVNTable() const { return VNtoStores; }
141 // Records all call instructions candidate for code hoisting.
143 VNtoInsns VNtoCallsScalars;
144 VNtoInsns VNtoCallsLoads;
145 VNtoInsns VNtoCallsStores;
148 // Insert Call and its value numbering in one of the VNtoCalls* containers.
149 void insert(CallInst *Call, GVN::ValueTable &VN) {
150 // A call that doesNotAccessMemory is handled as a Scalar,
151 // onlyReadsMemory will be handled as a Load instruction,
152 // all other calls will be handled as stores.
153 unsigned V = VN.lookupOrAdd(Call);
154 auto Entry = std::make_pair(V, InvalidVN);
156 if (Call->doesNotAccessMemory())
157 VNtoCallsScalars[Entry].push_back(Call);
158 else if (Call->onlyReadsMemory())
159 VNtoCallsLoads[Entry].push_back(Call);
161 VNtoCallsStores[Entry].push_back(Call);
164 const VNtoInsns &getScalarVNTable() const { return VNtoCallsScalars; }
166 const VNtoInsns &getLoadVNTable() const { return VNtoCallsLoads; }
168 const VNtoInsns &getStoreVNTable() const { return VNtoCallsStores; }
171 typedef DenseMap<const BasicBlock *, bool> BBSideEffectsSet;
172 typedef SmallVector<Instruction *, 4> SmallVecInsn;
173 typedef SmallVectorImpl<Instruction *> SmallVecImplInsn;
175 // This pass hoists common computations across branches sharing common
176 // dominator. The primary goal is to reduce the code size, and in some
177 // cases reduce critical path (by exposing more ILP).
183 MemoryDependenceResults *MD;
184 const bool OptForMinSize;
185 DenseMap<const BasicBlock *, unsigned> DFSNumber;
186 BBSideEffectsSet BBSideEffects;
190 enum InsKind { Unknown, Scalar, Load, Store };
192 GVNHoist(DominatorTree *Dt, AliasAnalysis *Aa, MemoryDependenceResults *Md,
194 : DT(Dt), AA(Aa), MD(Md), OptForMinSize(OptForMinSize), HoistedCtr(0) {}
196 // Return true when there are exception handling in BB.
197 bool hasEH(const BasicBlock *BB) {
198 auto It = BBSideEffects.find(BB);
199 if (It != BBSideEffects.end())
202 if (BB->isEHPad() || BB->hasAddressTaken()) {
203 BBSideEffects[BB] = true;
207 if (BB->getTerminator()->mayThrow()) {
208 BBSideEffects[BB] = true;
212 BBSideEffects[BB] = false;
216 // Return true when all paths from A to the end of the function pass through
218 bool hoistingFromAllPaths(const BasicBlock *A, const BasicBlock *B,
219 const BasicBlock *C) {
220 // We fully copy the WL in order to be able to remove items from it.
221 SmallPtrSet<const BasicBlock *, 2> WL;
225 for (auto It = df_begin(A), E = df_end(A); It != E;) {
226 // There exists a path from A to the exit of the function if we are still
227 // iterating in DF traversal and we removed all instructions from the work
232 const BasicBlock *BB = *It;
234 // Stop DFS traversal when BB is in the work list.
239 // Check for end of function, calls that do not return, etc.
240 if (!isGuaranteedToTransferExecutionToSuccessor(BB->getTerminator()))
243 // Increment DFS traversal when not skipping children.
250 /* Return true when I1 appears before I2 in the instructions of BB. */
251 bool firstInBB(BasicBlock *BB, const Instruction *I1, const Instruction *I2) {
252 for (Instruction &I : *BB) {
259 llvm_unreachable("I1 and I2 not found in BB");
261 // Return true when there are users of Def in BB.
262 bool hasMemoryUseOnPath(MemoryAccess *Def, const BasicBlock *BB,
263 const Instruction *OldPt) {
264 const BasicBlock *DefBB = Def->getBlock();
265 const BasicBlock *OldBB = OldPt->getParent();
267 for (User *U : Def->users())
268 if (auto *MU = dyn_cast<MemoryUse>(U)) {
269 BasicBlock *UBB = MU->getBlock();
270 // Only analyze uses in BB.
274 // A use in the same block as the Def is on the path.
276 assert(MSSA->locallyDominates(Def, MU) && "def not dominating use");
283 // It is only harmful to hoist when the use is before OldPt.
284 if (firstInBB(UBB, MU->getMemoryInst(), OldPt))
291 // Return true when there are exception handling or loads of memory Def
292 // between OldPt and NewPt.
294 // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
295 // return true when the counter NBBsOnAllPaths reaces 0, except when it is
296 // initialized to -1 which is unlimited.
297 bool hasEHOrLoadsOnPath(const Instruction *NewPt, const Instruction *OldPt,
298 MemoryAccess *Def, int &NBBsOnAllPaths) {
299 const BasicBlock *NewBB = NewPt->getParent();
300 const BasicBlock *OldBB = OldPt->getParent();
301 assert(DT->dominates(NewBB, OldBB) && "invalid path");
302 assert(DT->dominates(Def->getBlock(), NewBB) &&
303 "def does not dominate new hoisting point");
305 // Walk all basic blocks reachable in depth-first iteration on the inverse
306 // CFG from OldBB to NewBB. These blocks are all the blocks that may be
307 // executed between the execution of NewBB and OldBB. Hoisting an expression
308 // from OldBB into NewBB has to be safe on all execution paths.
309 for (auto I = idf_begin(OldBB), E = idf_end(OldBB); I != E;) {
311 // Stop traversal when reaching HoistPt.
316 // Impossible to hoist with exceptions on the path.
320 // Check that we do not move a store past loads.
321 if (hasMemoryUseOnPath(Def, *I, OldPt))
324 // Stop walk once the limit is reached.
325 if (NBBsOnAllPaths == 0)
328 // -1 is unlimited number of blocks on all paths.
329 if (NBBsOnAllPaths != -1)
338 // Return true when there are exception handling between HoistPt and BB.
339 // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
340 // return true when the counter NBBsOnAllPaths reaches 0, except when it is
341 // initialized to -1 which is unlimited.
342 bool hasEHOnPath(const BasicBlock *HoistPt, const BasicBlock *BB,
343 int &NBBsOnAllPaths) {
344 assert(DT->dominates(HoistPt, BB) && "Invalid path");
346 // Walk all basic blocks reachable in depth-first iteration on
347 // the inverse CFG from BBInsn to NewHoistPt. These blocks are all the
348 // blocks that may be executed between the execution of NewHoistPt and
349 // BBInsn. Hoisting an expression from BBInsn into NewHoistPt has to be safe
350 // on all execution paths.
351 for (auto I = idf_begin(BB), E = idf_end(BB); I != E;) {
353 // Stop traversal when reaching NewHoistPt.
358 // Impossible to hoist with exceptions on the path.
362 // Stop walk once the limit is reached.
363 if (NBBsOnAllPaths == 0)
366 // -1 is unlimited number of blocks on all paths.
367 if (NBBsOnAllPaths != -1)
376 // Return true when it is safe to hoist a memory load or store U from OldPt
378 bool safeToHoistLdSt(const Instruction *NewPt, const Instruction *OldPt,
379 MemoryUseOrDef *U, InsKind K, int &NBBsOnAllPaths) {
381 // In place hoisting is safe.
385 const BasicBlock *NewBB = NewPt->getParent();
386 const BasicBlock *OldBB = OldPt->getParent();
387 const BasicBlock *UBB = U->getBlock();
389 // Check for dependences on the Memory SSA.
390 MemoryAccess *D = U->getDefiningAccess();
391 BasicBlock *DBB = D->getBlock();
392 if (DT->properlyDominates(NewBB, DBB))
393 // Cannot move the load or store to NewBB above its definition in DBB.
396 if (NewBB == DBB && !MSSA->isLiveOnEntryDef(D))
397 if (auto *UD = dyn_cast<MemoryUseOrDef>(D))
398 if (firstInBB(DBB, NewPt, UD->getMemoryInst()))
399 // Cannot move the load or store to NewPt above its definition in D.
402 // Check for unsafe hoistings due to side effects.
403 if (K == InsKind::Store) {
404 if (hasEHOrLoadsOnPath(NewPt, OldPt, D, NBBsOnAllPaths))
406 } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths))
410 if (DT->properlyDominates(DBB, NewBB))
413 assert(MSSA->locallyDominates(D, U));
416 // No side effects: it is safe to hoist.
420 // Return true when it is safe to hoist scalar instructions from BB1 and BB2
422 bool safeToHoistScalar(const BasicBlock *HoistBB, const BasicBlock *BB1,
423 const BasicBlock *BB2, int &NBBsOnAllPaths) {
424 // Check that the hoisted expression is needed on all paths. When HoistBB
425 // already contains an instruction to be hoisted, the expression is needed
426 // on all paths. Enable scalar hoisting at -Oz as it is safe to hoist
427 // scalars to a place where they are partially needed.
428 if (!OptForMinSize && BB1 != HoistBB &&
429 !hoistingFromAllPaths(HoistBB, BB1, BB2))
432 if (hasEHOnPath(HoistBB, BB1, NBBsOnAllPaths) ||
433 hasEHOnPath(HoistBB, BB2, NBBsOnAllPaths))
436 // Safe to hoist scalars from BB1 and BB2 to HoistBB.
440 // Each element of a hoisting list contains the basic block where to hoist and
441 // a list of instructions to be hoisted.
442 typedef std::pair<BasicBlock *, SmallVecInsn> HoistingPointInfo;
443 typedef SmallVector<HoistingPointInfo, 4> HoistingPointList;
445 // Partition InstructionsToHoist into a set of candidates which can share a
446 // common hoisting point. The partitions are collected in HPL. IsScalar is
447 // true when the instructions in InstructionsToHoist are scalars. IsLoad is
448 // true when the InstructionsToHoist are loads, false when they are stores.
449 void partitionCandidates(SmallVecImplInsn &InstructionsToHoist,
450 HoistingPointList &HPL, InsKind K) {
451 // No need to sort for two instructions.
452 if (InstructionsToHoist.size() > 2) {
453 SortByDFSIn Pred(DFSNumber);
454 std::sort(InstructionsToHoist.begin(), InstructionsToHoist.end(), Pred);
457 int NBBsOnAllPaths = MaxNumberOfBBSInPath;
459 SmallVecImplInsn::iterator II = InstructionsToHoist.begin();
460 SmallVecImplInsn::iterator Start = II;
461 Instruction *HoistPt = *II;
462 BasicBlock *HoistBB = HoistPt->getParent();
464 if (K != InsKind::Scalar)
465 UD = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(HoistPt));
467 for (++II; II != InstructionsToHoist.end(); ++II) {
468 Instruction *Insn = *II;
469 BasicBlock *BB = Insn->getParent();
470 BasicBlock *NewHoistBB;
471 Instruction *NewHoistPt;
474 NewHoistBB = HoistBB;
475 NewHoistPt = firstInBB(BB, Insn, HoistPt) ? Insn : HoistPt;
477 NewHoistBB = DT->findNearestCommonDominator(HoistBB, BB);
478 if (NewHoistBB == BB)
480 else if (NewHoistBB == HoistBB)
481 NewHoistPt = HoistPt;
483 NewHoistPt = NewHoistBB->getTerminator();
486 if (K == InsKind::Scalar) {
487 if (safeToHoistScalar(NewHoistBB, HoistBB, BB, NBBsOnAllPaths)) {
488 // Extend HoistPt to NewHoistPt.
489 HoistPt = NewHoistPt;
490 HoistBB = NewHoistBB;
494 // When NewBB already contains an instruction to be hoisted, the
495 // expression is needed on all paths.
496 // Check that the hoisted expression is needed on all paths: it is
497 // unsafe to hoist loads to a place where there may be a path not
498 // loading from the same address: for instance there may be a branch on
499 // which the address of the load may not be initialized.
500 if ((HoistBB == NewHoistBB || BB == NewHoistBB ||
501 hoistingFromAllPaths(NewHoistBB, HoistBB, BB)) &&
502 // Also check that it is safe to move the load or store from HoistPt
503 // to NewHoistPt, and from Insn to NewHoistPt.
504 safeToHoistLdSt(NewHoistPt, HoistPt, UD, K, NBBsOnAllPaths) &&
505 safeToHoistLdSt(NewHoistPt, Insn,
506 cast<MemoryUseOrDef>(MSSA->getMemoryAccess(Insn)),
507 K, NBBsOnAllPaths)) {
508 // Extend HoistPt to NewHoistPt.
509 HoistPt = NewHoistPt;
510 HoistBB = NewHoistBB;
515 // At this point it is not safe to extend the current hoisting to
516 // NewHoistPt: save the hoisting list so far.
517 if (std::distance(Start, II) > 1)
518 HPL.push_back({HoistBB, SmallVecInsn(Start, II)});
520 // Start over from BB.
522 if (K != InsKind::Scalar)
523 UD = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(*Start));
526 NBBsOnAllPaths = MaxNumberOfBBSInPath;
529 // Save the last partition.
530 if (std::distance(Start, II) > 1)
531 HPL.push_back({HoistBB, SmallVecInsn(Start, II)});
534 // Initialize HPL from Map.
535 void computeInsertionPoints(const VNtoInsns &Map, HoistingPointList &HPL,
537 for (const auto &Entry : Map) {
538 if (MaxHoistedThreshold != -1 && ++HoistedCtr > MaxHoistedThreshold)
541 const SmallVecInsn &V = Entry.second;
545 // Compute the insertion point and the list of expressions to be hoisted.
546 SmallVecInsn InstructionsToHoist;
548 if (!hasEH(I->getParent()))
549 InstructionsToHoist.push_back(I);
551 if (!InstructionsToHoist.empty())
552 partitionCandidates(InstructionsToHoist, HPL, K);
556 // Return true when all operands of Instr are available at insertion point
557 // HoistPt. When limiting the number of hoisted expressions, one could hoist
558 // a load without hoisting its access function. So before hoisting any
559 // expression, make sure that all its operands are available at insert point.
560 bool allOperandsAvailable(const Instruction *I,
561 const BasicBlock *HoistPt) const {
562 for (const Use &Op : I->operands())
563 if (const auto *Inst = dyn_cast<Instruction>(&Op))
564 if (!DT->dominates(Inst->getParent(), HoistPt))
570 Instruction *firstOfTwo(Instruction *I, Instruction *J) const {
571 for (Instruction &I1 : *I->getParent())
572 if (&I1 == I || &I1 == J)
574 llvm_unreachable("Both I and J must be from same BB");
577 // Replace the use of From with To in Insn.
578 void replaceUseWith(Instruction *Insn, Value *From, Value *To) const {
579 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
582 if (U.getUser() == Insn) {
587 llvm_unreachable("should replace exactly once");
590 bool makeOperandsAvailable(Instruction *Repl, BasicBlock *HoistPt) const {
591 // Check whether the GEP of a ld/st can be synthesized at HoistPt.
592 GetElementPtrInst *Gep = nullptr;
593 Instruction *Val = nullptr;
594 if (auto *Ld = dyn_cast<LoadInst>(Repl))
595 Gep = dyn_cast<GetElementPtrInst>(Ld->getPointerOperand());
596 if (auto *St = dyn_cast<StoreInst>(Repl)) {
597 Gep = dyn_cast<GetElementPtrInst>(St->getPointerOperand());
598 Val = dyn_cast<Instruction>(St->getValueOperand());
599 // Check that the stored value is available.
601 if (isa<GetElementPtrInst>(Val)) {
602 // Check whether we can compute the GEP at HoistPt.
603 if (!allOperandsAvailable(Val, HoistPt))
605 } else if (!DT->dominates(Val->getParent(), HoistPt))
610 // Check whether we can compute the Gep at HoistPt.
611 if (!Gep || !allOperandsAvailable(Gep, HoistPt))
614 // Copy the gep before moving the ld/st.
615 Instruction *ClonedGep = Gep->clone();
616 ClonedGep->insertBefore(HoistPt->getTerminator());
617 replaceUseWith(Repl, Gep, ClonedGep);
619 // Also copy Val when it is a GEP.
620 if (Val && isa<GetElementPtrInst>(Val)) {
621 Instruction *ClonedVal = Val->clone();
622 ClonedVal->insertBefore(HoistPt->getTerminator());
623 replaceUseWith(Repl, Val, ClonedVal);
629 std::pair<unsigned, unsigned> hoist(HoistingPointList &HPL) {
630 unsigned NI = 0, NL = 0, NS = 0, NC = 0, NR = 0;
631 for (const HoistingPointInfo &HP : HPL) {
632 // Find out whether we already have one of the instructions in HoistPt,
633 // in which case we do not have to move it.
634 BasicBlock *HoistPt = HP.first;
635 const SmallVecInsn &InstructionsToHoist = HP.second;
636 Instruction *Repl = nullptr;
637 for (Instruction *I : InstructionsToHoist)
638 if (I->getParent() == HoistPt) {
639 // If there are two instructions in HoistPt to be hoisted in place:
640 // update Repl to be the first one, such that we can rename the uses
641 // of the second based on the first.
642 Repl = !Repl ? I : firstOfTwo(Repl, I);
646 // Repl is already in HoistPt: it remains in place.
647 assert(allOperandsAvailable(Repl, HoistPt) &&
648 "instruction depends on operands that are not available");
650 // When we do not find Repl in HoistPt, select the first in the list
651 // and move it to HoistPt.
652 Repl = InstructionsToHoist.front();
654 // We can move Repl in HoistPt only when all operands are available.
655 // The order in which hoistings are done may influence the availability
657 if (!allOperandsAvailable(Repl, HoistPt) &&
658 !makeOperandsAvailable(Repl, HoistPt))
660 Repl->moveBefore(HoistPt->getTerminator());
663 if (isa<LoadInst>(Repl))
665 else if (isa<StoreInst>(Repl))
667 else if (isa<CallInst>(Repl))
672 // Remove and rename all other instructions.
673 for (Instruction *I : InstructionsToHoist)
676 if (isa<LoadInst>(Repl))
678 else if (isa<StoreInst>(Repl))
680 else if (isa<CallInst>(Repl))
682 I->replaceAllUsesWith(Repl);
683 I->eraseFromParent();
687 NumHoisted += NL + NS + NC + NI;
689 NumLoadsHoisted += NL;
690 NumStoresHoisted += NS;
691 NumCallsHoisted += NC;
692 return {NI, NL + NC + NS};
695 // Hoist all expressions. Returns Number of scalars hoisted
696 // and number of non-scalars hoisted.
697 std::pair<unsigned, unsigned> hoistExpressions(Function &F) {
702 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) {
703 for (Instruction &I1 : *BB) {
704 if (auto *Load = dyn_cast<LoadInst>(&I1))
706 else if (auto *Store = dyn_cast<StoreInst>(&I1))
707 SI.insert(Store, VN);
708 else if (auto *Call = dyn_cast<CallInst>(&I1)) {
709 if (auto *Intr = dyn_cast<IntrinsicInst>(Call)) {
710 if (isa<DbgInfoIntrinsic>(Intr) ||
711 Intr->getIntrinsicID() == Intrinsic::assume)
714 if (Call->mayHaveSideEffects()) {
717 // We may continue hoisting across calls which write to memory.
718 if (Call->mayThrow())
722 } else if (OptForMinSize || !isa<GetElementPtrInst>(&I1))
723 // Do not hoist scalars past calls that may write to memory because
724 // that could result in spills later. geps are handled separately.
725 // TODO: We can relax this for targets like AArch64 as they have more
726 // registers than X86.
731 HoistingPointList HPL;
732 computeInsertionPoints(II.getVNTable(), HPL, InsKind::Scalar);
733 computeInsertionPoints(LI.getVNTable(), HPL, InsKind::Load);
734 computeInsertionPoints(SI.getVNTable(), HPL, InsKind::Store);
735 computeInsertionPoints(CI.getScalarVNTable(), HPL, InsKind::Scalar);
736 computeInsertionPoints(CI.getLoadVNTable(), HPL, InsKind::Load);
737 computeInsertionPoints(CI.getStoreVNTable(), HPL, InsKind::Store);
741 bool run(Function &F) {
743 VN.setAliasAnalysis(AA);
748 for (const BasicBlock *BB : depth_first(&F.getEntryBlock()))
749 DFSNumber.insert({BB, ++I});
751 // FIXME: use lazy evaluation of VN to avoid the fix-point computation.
753 // FIXME: only compute MemorySSA once. We need to update the analysis in
754 // the same time as transforming the code.
755 MemorySSA M(F, AA, DT);
758 auto HoistStat = hoistExpressions(F);
759 if (HoistStat.first + HoistStat.second == 0) {
762 if (HoistStat.second > 0) {
763 // To address a limitation of the current GVN, we need to rerun the
764 // hoisting after we hoisted loads in order to be able to hoist all
765 // scalars dependent on the hoisted loads. Same for stores.
775 class GVNHoistLegacyPass : public FunctionPass {
779 GVNHoistLegacyPass() : FunctionPass(ID) {
780 initializeGVNHoistLegacyPassPass(*PassRegistry::getPassRegistry());
783 bool runOnFunction(Function &F) override {
784 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
785 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
786 auto &MD = getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
788 GVNHoist G(&DT, &AA, &MD, F.optForMinSize());
792 void getAnalysisUsage(AnalysisUsage &AU) const override {
793 AU.addRequired<DominatorTreeWrapperPass>();
794 AU.addRequired<AAResultsWrapperPass>();
795 AU.addRequired<MemoryDependenceWrapperPass>();
796 AU.addPreserved<DominatorTreeWrapperPass>();
801 PreservedAnalyses GVNHoistPass::run(Function &F,
802 AnalysisManager<Function> &AM) {
803 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
804 AliasAnalysis &AA = AM.getResult<AAManager>(F);
805 MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);
807 GVNHoist G(&DT, &AA, &MD, F.optForMinSize());
809 return PreservedAnalyses::all();
811 PreservedAnalyses PA;
812 PA.preserve<DominatorTreeAnalysis>();
816 char GVNHoistLegacyPass::ID = 0;
817 INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass, "gvn-hoist",
818 "Early GVN Hoisting of Expressions", false, false)
819 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
820 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
821 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
822 INITIALIZE_PASS_END(GVNHoistLegacyPass, "gvn-hoist",
823 "Early GVN Hoisting of Expressions", false, false)
825 FunctionPass *llvm::createGVNHoistPass() { return new GVNHoistLegacyPass(); }