1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a simple dominator tree walk that eliminates trivially
11 // redundant instructions.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Scalar/EarlyCSE.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/Hashing.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopedHashTable.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/PassManager.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/DebugCounter.h"
54 #include "llvm/Support/RecyclingAllocator.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Scalar.h"
63 using namespace llvm::PatternMatch;
65 #define DEBUG_TYPE "early-cse"
67 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
68 STATISTIC(NumCSE, "Number of instructions CSE'd");
69 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
70 STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
71 STATISTIC(NumCSECall, "Number of call instructions CSE'd");
72 STATISTIC(NumDSE, "Number of trivial dead stores removed");
74 DEBUG_COUNTER(CSECounter, "early-cse",
75 "Controls which instructions are removed");
77 //===----------------------------------------------------------------------===//
79 //===----------------------------------------------------------------------===//
83 /// Struct representing the available values in the scoped hash table.
87 SimpleValue(Instruction *I) : Inst(I) {
88 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
91 bool isSentinel() const {
92 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
93 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
96 static bool canHandle(Instruction *Inst) {
97 // This can only handle non-void readnone functions.
98 if (CallInst *CI = dyn_cast<CallInst>(Inst))
99 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
100 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
101 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
102 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
103 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
104 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
108 } // end anonymous namespace
112 template <> struct DenseMapInfo<SimpleValue> {
113 static inline SimpleValue getEmptyKey() {
114 return DenseMapInfo<Instruction *>::getEmptyKey();
117 static inline SimpleValue getTombstoneKey() {
118 return DenseMapInfo<Instruction *>::getTombstoneKey();
121 static unsigned getHashValue(SimpleValue Val);
122 static bool isEqual(SimpleValue LHS, SimpleValue RHS);
125 } // end namespace llvm
127 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
128 Instruction *Inst = Val.Inst;
129 // Hash in all of the operands as pointers.
130 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
131 Value *LHS = BinOp->getOperand(0);
132 Value *RHS = BinOp->getOperand(1);
133 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
136 return hash_combine(BinOp->getOpcode(), LHS, RHS);
139 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
140 Value *LHS = CI->getOperand(0);
141 Value *RHS = CI->getOperand(1);
142 CmpInst::Predicate Pred = CI->getPredicate();
143 if (Inst->getOperand(0) > Inst->getOperand(1)) {
145 Pred = CI->getSwappedPredicate();
147 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
150 // Hash min/max/abs (cmp + select) to allow for commuted operands.
151 // Min/max may also have non-canonical compare predicate (eg, the compare for
152 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
155 SelectPatternFlavor SPF = matchSelectPattern(Inst, A, B).Flavor;
156 // TODO: We should also detect FP min/max.
157 if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
158 SPF == SPF_UMIN || SPF == SPF_UMAX) {
161 return hash_combine(Inst->getOpcode(), SPF, A, B);
163 if (SPF == SPF_ABS || SPF == SPF_NABS) {
164 // ABS/NABS always puts the input in A and its negation in B.
165 return hash_combine(Inst->getOpcode(), SPF, A, B);
168 if (CastInst *CI = dyn_cast<CastInst>(Inst))
169 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
171 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
172 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
173 hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
175 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
176 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
178 hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
180 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
181 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
182 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
183 isa<ShuffleVectorInst>(Inst)) &&
184 "Invalid/unknown instruction");
186 // Mix in the opcode.
189 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
192 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
193 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
195 if (LHS.isSentinel() || RHS.isSentinel())
198 if (LHSI->getOpcode() != RHSI->getOpcode())
200 if (LHSI->isIdenticalToWhenDefined(RHSI))
203 // If we're not strictly identical, we still might be a commutable instruction
204 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
205 if (!LHSBinOp->isCommutative())
208 assert(isa<BinaryOperator>(RHSI) &&
209 "same opcode, but different instruction type?");
210 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
213 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
214 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
216 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
217 assert(isa<CmpInst>(RHSI) &&
218 "same opcode, but different instruction type?");
219 CmpInst *RHSCmp = cast<CmpInst>(RHSI);
221 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
222 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
223 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
226 // Min/max/abs can occur with commuted operands, non-canonical predicates,
227 // and/or non-canonical operands.
229 SelectPatternFlavor LSPF = matchSelectPattern(LHSI, LHSA, LHSB).Flavor;
230 // TODO: We should also detect FP min/max.
231 if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
232 LSPF == SPF_UMIN || LSPF == SPF_UMAX ||
233 LSPF == SPF_ABS || LSPF == SPF_NABS) {
235 SelectPatternFlavor RSPF = matchSelectPattern(RHSI, RHSA, RHSB).Flavor;
237 // Abs results are placed in a defined order by matchSelectPattern.
238 if (LSPF == SPF_ABS || LSPF == SPF_NABS)
239 return LHSA == RHSA && LHSB == RHSB;
240 return ((LHSA == RHSA && LHSB == RHSB) ||
241 (LHSA == RHSB && LHSB == RHSA));
248 //===----------------------------------------------------------------------===//
250 //===----------------------------------------------------------------------===//
254 /// Struct representing the available call values in the scoped hash
259 CallValue(Instruction *I) : Inst(I) {
260 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
263 bool isSentinel() const {
264 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
265 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
268 static bool canHandle(Instruction *Inst) {
269 // Don't value number anything that returns void.
270 if (Inst->getType()->isVoidTy())
273 CallInst *CI = dyn_cast<CallInst>(Inst);
274 if (!CI || !CI->onlyReadsMemory())
280 } // end anonymous namespace
284 template <> struct DenseMapInfo<CallValue> {
285 static inline CallValue getEmptyKey() {
286 return DenseMapInfo<Instruction *>::getEmptyKey();
289 static inline CallValue getTombstoneKey() {
290 return DenseMapInfo<Instruction *>::getTombstoneKey();
293 static unsigned getHashValue(CallValue Val);
294 static bool isEqual(CallValue LHS, CallValue RHS);
297 } // end namespace llvm
299 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
300 Instruction *Inst = Val.Inst;
301 // Hash all of the operands as pointers and mix in the opcode.
304 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
307 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
308 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
309 if (LHS.isSentinel() || RHS.isSentinel())
311 return LHSI->isIdenticalTo(RHSI);
314 //===----------------------------------------------------------------------===//
315 // EarlyCSE implementation
316 //===----------------------------------------------------------------------===//
320 /// A simple and fast domtree-based CSE pass.
322 /// This pass does a simple depth-first walk over the dominator tree,
323 /// eliminating trivially redundant instructions and using instsimplify to
324 /// canonicalize things as it goes. It is intended to be fast and catch obvious
325 /// cases so that instcombine and other passes are more effective. It is
326 /// expected that a later pass of GVN will catch the interesting/hard cases.
329 const TargetLibraryInfo &TLI;
330 const TargetTransformInfo &TTI;
333 const SimplifyQuery SQ;
335 std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
338 RecyclingAllocator<BumpPtrAllocator,
339 ScopedHashTableVal<SimpleValue, Value *>>;
341 ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
344 /// A scoped hash table of the current values of all of our simple
345 /// scalar expressions.
347 /// As we walk down the domtree, we look to see if instructions are in this:
348 /// if so, we replace them with what we find, otherwise we insert them so
349 /// that dominated values can succeed in their lookup.
350 ScopedHTType AvailableValues;
352 /// A scoped hash table of the current values of previously encountered
353 /// memory locations.
355 /// This allows us to get efficient access to dominating loads or stores when
356 /// we have a fully redundant load. In addition to the most recent load, we
357 /// keep track of a generation count of the read, which is compared against
358 /// the current generation count. The current generation count is incremented
359 /// after every possibly writing memory operation, which ensures that we only
360 /// CSE loads with other loads that have no intervening store. Ordering
361 /// events (such as fences or atomic instructions) increment the generation
362 /// count as well; essentially, we model these as writes to all possible
363 /// locations. Note that atomic and/or volatile loads and stores can be
364 /// present the table; it is the responsibility of the consumer to inspect
365 /// the atomicity/volatility if needed.
367 Instruction *DefInst = nullptr;
368 unsigned Generation = 0;
370 bool IsAtomic = false;
372 LoadValue() = default;
373 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
375 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
376 IsAtomic(IsAtomic) {}
379 using LoadMapAllocator =
380 RecyclingAllocator<BumpPtrAllocator,
381 ScopedHashTableVal<Value *, LoadValue>>;
383 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
386 LoadHTType AvailableLoads;
388 // A scoped hash table mapping memory locations (represented as typed
389 // addresses) to generation numbers at which that memory location became
390 // (henceforth indefinitely) invariant.
391 using InvariantMapAllocator =
392 RecyclingAllocator<BumpPtrAllocator,
393 ScopedHashTableVal<MemoryLocation, unsigned>>;
394 using InvariantHTType =
395 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
396 InvariantMapAllocator>;
397 InvariantHTType AvailableInvariants;
399 /// A scoped hash table of the current values of read-only call
402 /// It uses the same generation count as loads.
404 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
405 CallHTType AvailableCalls;
407 /// This is the current generation of the memory value.
408 unsigned CurrentGeneration = 0;
410 /// Set up the EarlyCSE runner for a particular function.
411 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
412 const TargetTransformInfo &TTI, DominatorTree &DT,
413 AssumptionCache &AC, MemorySSA *MSSA)
414 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
415 MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
420 // Almost a POD, but needs to call the constructors for the scoped hash
421 // tables so that a new scope gets pushed on. These are RAII so that the
422 // scope gets popped when the NodeScope is destroyed.
425 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
426 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
427 : Scope(AvailableValues), LoadScope(AvailableLoads),
428 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
429 NodeScope(const NodeScope &) = delete;
430 NodeScope &operator=(const NodeScope &) = delete;
433 ScopedHTType::ScopeTy Scope;
434 LoadHTType::ScopeTy LoadScope;
435 InvariantHTType::ScopeTy InvariantScope;
436 CallHTType::ScopeTy CallScope;
439 // Contains all the needed information to create a stack for doing a depth
440 // first traversal of the tree. This includes scopes for values, loads, and
441 // calls as well as the generation. There is a child iterator so that the
442 // children do not need to be store separately.
445 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
446 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
447 unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
448 DomTreeNode::iterator end)
449 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
451 Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
454 StackNode(const StackNode &) = delete;
455 StackNode &operator=(const StackNode &) = delete;
458 unsigned currentGeneration() { return CurrentGeneration; }
459 unsigned childGeneration() { return ChildGeneration; }
460 void childGeneration(unsigned generation) { ChildGeneration = generation; }
461 DomTreeNode *node() { return Node; }
462 DomTreeNode::iterator childIter() { return ChildIter; }
464 DomTreeNode *nextChild() {
465 DomTreeNode *child = *ChildIter;
470 DomTreeNode::iterator end() { return EndIter; }
471 bool isProcessed() { return Processed; }
472 void process() { Processed = true; }
475 unsigned CurrentGeneration;
476 unsigned ChildGeneration;
478 DomTreeNode::iterator ChildIter;
479 DomTreeNode::iterator EndIter;
481 bool Processed = false;
484 /// Wrapper class to handle memory instructions, including loads,
485 /// stores and intrinsic loads and stores defined by the target.
486 class ParseMemoryInst {
488 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
490 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
491 if (TTI.getTgtMemIntrinsic(II, Info))
492 IsTargetMemInst = true;
495 bool isLoad() const {
496 if (IsTargetMemInst) return Info.ReadMem;
497 return isa<LoadInst>(Inst);
500 bool isStore() const {
501 if (IsTargetMemInst) return Info.WriteMem;
502 return isa<StoreInst>(Inst);
505 bool isAtomic() const {
507 return Info.Ordering != AtomicOrdering::NotAtomic;
508 return Inst->isAtomic();
511 bool isUnordered() const {
513 return Info.isUnordered();
515 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
516 return LI->isUnordered();
517 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
518 return SI->isUnordered();
520 // Conservative answer
521 return !Inst->isAtomic();
524 bool isVolatile() const {
526 return Info.IsVolatile;
528 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
529 return LI->isVolatile();
530 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
531 return SI->isVolatile();
533 // Conservative answer
537 bool isInvariantLoad() const {
538 if (auto *LI = dyn_cast<LoadInst>(Inst))
539 return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
543 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
544 return (getPointerOperand() == Inst.getPointerOperand() &&
545 getMatchingId() == Inst.getMatchingId());
548 bool isValid() const { return getPointerOperand() != nullptr; }
550 // For regular (non-intrinsic) loads/stores, this is set to -1. For
551 // intrinsic loads/stores, the id is retrieved from the corresponding
552 // field in the MemIntrinsicInfo structure. That field contains
553 // non-negative values only.
554 int getMatchingId() const {
555 if (IsTargetMemInst) return Info.MatchingId;
559 Value *getPointerOperand() const {
560 if (IsTargetMemInst) return Info.PtrVal;
561 return getLoadStorePointerOperand(Inst);
564 bool mayReadFromMemory() const {
565 if (IsTargetMemInst) return Info.ReadMem;
566 return Inst->mayReadFromMemory();
569 bool mayWriteToMemory() const {
570 if (IsTargetMemInst) return Info.WriteMem;
571 return Inst->mayWriteToMemory();
575 bool IsTargetMemInst = false;
576 MemIntrinsicInfo Info;
580 bool processNode(DomTreeNode *Node);
582 bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
583 const BasicBlock *BB, const BasicBlock *Pred);
585 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
586 if (auto *LI = dyn_cast<LoadInst>(Inst))
588 if (auto *SI = dyn_cast<StoreInst>(Inst))
589 return SI->getValueOperand();
590 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
591 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
595 /// Return true if the instruction is known to only operate on memory
596 /// provably invariant in the given "generation".
597 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
599 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
600 Instruction *EarlierInst, Instruction *LaterInst);
602 void removeMSSA(Instruction *Inst) {
605 // Removing a store here can leave MemorySSA in an unoptimized state by
606 // creating MemoryPhis that have identical arguments and by creating
607 // MemoryUses whose defining access is not an actual clobber. We handle the
608 // phi case eagerly here. The non-optimized MemoryUse case is lazily
609 // updated by MemorySSA getClobberingMemoryAccess.
610 if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
611 // Optimize MemoryPhi nodes that may become redundant by having all the
612 // same input values once MA is removed.
613 SmallSetVector<MemoryPhi *, 4> PhisToCheck;
614 SmallVector<MemoryAccess *, 8> WorkQueue;
615 WorkQueue.push_back(MA);
616 // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
617 // we shouldn't be processing that many phis and this will avoid an
618 // allocation in almost all cases.
619 for (unsigned I = 0; I < WorkQueue.size(); ++I) {
620 MemoryAccess *WI = WorkQueue[I];
622 for (auto *U : WI->users())
623 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
624 PhisToCheck.insert(MP);
626 MSSAUpdater->removeMemoryAccess(WI);
628 for (MemoryPhi *MP : PhisToCheck) {
629 MemoryAccess *FirstIn = MP->getIncomingValue(0);
630 if (llvm::all_of(MP->incoming_values(),
631 [=](Use &In) { return In == FirstIn; }))
632 WorkQueue.push_back(MP);
640 } // end anonymous namespace
642 /// Determine if the memory referenced by LaterInst is from the same heap
643 /// version as EarlierInst.
644 /// This is currently called in two scenarios:
656 /// in both cases we want to verify that there are no possible writes to the
657 /// memory referenced by p between the earlier and later instruction.
658 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
659 unsigned LaterGeneration,
660 Instruction *EarlierInst,
661 Instruction *LaterInst) {
662 // Check the simple memory generation tracking first.
663 if (EarlierGeneration == LaterGeneration)
669 // If MemorySSA has determined that one of EarlierInst or LaterInst does not
670 // read/write memory, then we can safely return true here.
671 // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
672 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
673 // by also checking the MemorySSA MemoryAccess on the instruction. Initial
674 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
675 // with the default optimization pipeline.
676 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
679 auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
683 // Since we know LaterDef dominates LaterInst and EarlierInst dominates
684 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
685 // EarlierInst and LaterInst and neither can any other write that potentially
686 // clobbers LaterInst.
687 MemoryAccess *LaterDef =
688 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
689 return MSSA->dominates(LaterDef, EarlierMA);
692 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
693 // A location loaded from with an invariant_load is assumed to *never* change
694 // within the visible scope of the compilation.
695 if (auto *LI = dyn_cast<LoadInst>(I))
696 if (LI->getMetadata(LLVMContext::MD_invariant_load))
699 auto MemLocOpt = MemoryLocation::getOrNone(I);
701 // "target" intrinsic forms of loads aren't currently known to
702 // MemoryLocation::get. TODO
704 MemoryLocation MemLoc = *MemLocOpt;
705 if (!AvailableInvariants.count(MemLoc))
708 // Is the generation at which this became invariant older than the
710 return AvailableInvariants.lookup(MemLoc) <= GenAt;
713 bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
714 const BranchInst *BI, const BasicBlock *BB,
715 const BasicBlock *Pred) {
716 assert(BI->isConditional() && "Should be a conditional branch!");
717 assert(BI->getCondition() == CondInst && "Wrong condition?");
718 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
719 auto *TorF = (BI->getSuccessor(0) == BB)
720 ? ConstantInt::getTrue(BB->getContext())
721 : ConstantInt::getFalse(BB->getContext());
722 auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
723 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
724 return BOp->getOpcode() == Opcode;
727 // If the condition is AND operation, we can propagate its operands into the
728 // true branch. If it is OR operation, we can propagate them into the false
730 unsigned PropagateOpcode =
731 (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
733 bool MadeChanges = false;
734 SmallVector<Instruction *, 4> WorkList;
735 SmallPtrSet<Instruction *, 4> Visited;
736 WorkList.push_back(CondInst);
737 while (!WorkList.empty()) {
738 Instruction *Curr = WorkList.pop_back_val();
740 AvailableValues.insert(Curr, TorF);
741 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
742 << Curr->getName() << "' as " << *TorF << " in "
743 << BB->getName() << "\n");
744 if (!DebugCounter::shouldExecute(CSECounter)) {
745 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
747 // Replace all dominated uses with the known value.
748 if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
749 BasicBlockEdge(Pred, BB))) {
755 if (MatchBinOp(Curr, PropagateOpcode))
756 for (auto &Op : cast<BinaryOperator>(Curr)->operands())
757 if (Instruction *OPI = dyn_cast<Instruction>(Op))
758 if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
759 WorkList.push_back(OPI);
765 bool EarlyCSE::processNode(DomTreeNode *Node) {
766 bool Changed = false;
767 BasicBlock *BB = Node->getBlock();
769 // If this block has a single predecessor, then the predecessor is the parent
770 // of the domtree node and all of the live out memory values are still current
771 // in this block. If this block has multiple predecessors, then they could
772 // have invalidated the live-out memory values of our parent value. For now,
773 // just be conservative and invalidate memory if this block has multiple
775 if (!BB->getSinglePredecessor())
778 // If this node has a single predecessor which ends in a conditional branch,
779 // we can infer the value of the branch condition given that we took this
780 // path. We need the single predecessor to ensure there's not another path
781 // which reaches this block where the condition might hold a different
782 // value. Since we're adding this to the scoped hash table (like any other
783 // def), it will have been popped if we encounter a future merge block.
784 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
785 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
786 if (BI && BI->isConditional()) {
787 auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
788 if (CondInst && SimpleValue::canHandle(CondInst))
789 Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
793 /// LastStore - Keep track of the last non-volatile store that we saw... for
794 /// as long as there in no instruction that reads memory. If we see a store
795 /// to the same location, we delete the dead store. This zaps trivial dead
796 /// stores which can occur in bitfield code among other things.
797 Instruction *LastStore = nullptr;
799 // See if any instructions in the block can be eliminated. If so, do it. If
800 // not, add them to AvailableValues.
801 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
802 Instruction *Inst = &*I++;
804 // Dead instructions should just be removed.
805 if (isInstructionTriviallyDead(Inst, &TLI)) {
806 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
807 if (!DebugCounter::shouldExecute(CSECounter)) {
808 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
811 salvageDebugInfo(*Inst);
813 Inst->eraseFromParent();
819 // Skip assume intrinsics, they don't really have side effects (although
820 // they're marked as such to ensure preservation of control dependencies),
821 // and this pass will not bother with its removal. However, we should mark
822 // its condition as true for all dominated blocks.
823 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
825 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
826 if (CondI && SimpleValue::canHandle(CondI)) {
827 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
829 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
831 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
835 // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
836 if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
837 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
841 // We can skip all invariant.start intrinsics since they only read memory,
842 // and we can forward values across it. For invariant starts without
843 // invariant ends, we can use the fact that the invariantness never ends to
844 // start a scope in the current generaton which is true for all future
845 // generations. Also, we dont need to consume the last store since the
846 // semantics of invariant.start allow us to perform DSE of the last
847 // store, if there was a store following invariant.start. Consider:
850 // invariant.start(p)
852 // We can DSE the store to 30, since the store 40 to invariant location p
853 // causes undefined behaviour.
854 if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
855 // If there are any uses, the scope might end.
856 if (!Inst->use_empty())
858 auto *CI = cast<CallInst>(Inst);
859 MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
860 // Don't start a scope if we already have a better one pushed
861 if (!AvailableInvariants.count(MemLoc))
862 AvailableInvariants.insert(MemLoc, CurrentGeneration);
866 if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) {
868 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
869 if (SimpleValue::canHandle(CondI)) {
870 // Do we already know the actual value of this condition?
871 if (auto *KnownCond = AvailableValues.lookup(CondI)) {
872 // Is the condition known to be true?
873 if (isa<ConstantInt>(KnownCond) &&
874 cast<ConstantInt>(KnownCond)->isOne()) {
876 << "EarlyCSE removing guard: " << *Inst << '\n');
878 Inst->eraseFromParent();
882 // Use the known value if it wasn't true.
883 cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
885 // The condition we're on guarding here is true for all dominated
887 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
891 // Guard intrinsics read all memory, but don't write any memory.
892 // Accordingly, don't update the generation but consume the last store (to
893 // avoid an incorrect DSE).
898 // If the instruction can be simplified (e.g. X+0 = X) then replace it with
899 // its simpler value.
900 if (Value *V = SimplifyInstruction(Inst, SQ)) {
901 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V
903 if (!DebugCounter::shouldExecute(CSECounter)) {
904 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
907 if (!Inst->use_empty()) {
908 Inst->replaceAllUsesWith(V);
911 if (isInstructionTriviallyDead(Inst, &TLI)) {
913 Inst->eraseFromParent();
924 // If this is a simple instruction that we can value number, process it.
925 if (SimpleValue::canHandle(Inst)) {
926 // See if the instruction has an available value. If so, use it.
927 if (Value *V = AvailableValues.lookup(Inst)) {
928 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V
930 if (!DebugCounter::shouldExecute(CSECounter)) {
931 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
934 if (auto *I = dyn_cast<Instruction>(V))
936 Inst->replaceAllUsesWith(V);
938 Inst->eraseFromParent();
944 // Otherwise, just remember that this value is available.
945 AvailableValues.insert(Inst, Inst);
949 ParseMemoryInst MemInst(Inst, TTI);
950 // If this is a non-volatile load, process it.
951 if (MemInst.isValid() && MemInst.isLoad()) {
952 // (conservatively) we can't peak past the ordering implied by this
953 // operation, but we can add this load to our set of available values
954 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
959 if (MemInst.isInvariantLoad()) {
960 // If we pass an invariant load, we know that memory location is
961 // indefinitely constant from the moment of first dereferenceability.
962 // We conservatively treat the invariant_load as that moment. If we
963 // pass a invariant load after already establishing a scope, don't
964 // restart it since we want to preserve the earliest point seen.
965 auto MemLoc = MemoryLocation::get(Inst);
966 if (!AvailableInvariants.count(MemLoc))
967 AvailableInvariants.insert(MemLoc, CurrentGeneration);
970 // If we have an available version of this load, and if it is the right
971 // generation or the load is known to be from an invariant location,
972 // replace this instruction.
974 // If either the dominating load or the current load are invariant, then
975 // we can assume the current load loads the same value as the dominating
977 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
978 if (InVal.DefInst != nullptr &&
979 InVal.MatchingId == MemInst.getMatchingId() &&
980 // We don't yet handle removing loads with ordering of any kind.
981 !MemInst.isVolatile() && MemInst.isUnordered() &&
982 // We can't replace an atomic load with one which isn't also atomic.
983 InVal.IsAtomic >= MemInst.isAtomic() &&
984 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
985 isSameMemGeneration(InVal.Generation, CurrentGeneration,
986 InVal.DefInst, Inst))) {
987 Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
989 LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
990 << " to: " << *InVal.DefInst << '\n');
991 if (!DebugCounter::shouldExecute(CSECounter)) {
992 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
995 if (!Inst->use_empty())
996 Inst->replaceAllUsesWith(Op);
998 Inst->eraseFromParent();
1005 // Otherwise, remember that we have this instruction.
1006 AvailableLoads.insert(
1007 MemInst.getPointerOperand(),
1008 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1009 MemInst.isAtomic()));
1010 LastStore = nullptr;
1014 // If this instruction may read from memory or throw (and potentially read
1015 // from memory in the exception handler), forget LastStore. Load/store
1016 // intrinsics will indicate both a read and a write to memory. The target
1017 // may override this (e.g. so that a store intrinsic does not read from
1018 // memory, and thus will be treated the same as a regular store for
1019 // commoning purposes).
1020 if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
1021 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1022 LastStore = nullptr;
1024 // If this is a read-only call, process it.
1025 if (CallValue::canHandle(Inst)) {
1026 // If we have an available version of this call, and if it is the right
1027 // generation, replace this instruction.
1028 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
1029 if (InVal.first != nullptr &&
1030 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1032 LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
1033 << " to: " << *InVal.first << '\n');
1034 if (!DebugCounter::shouldExecute(CSECounter)) {
1035 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1038 if (!Inst->use_empty())
1039 Inst->replaceAllUsesWith(InVal.first);
1041 Inst->eraseFromParent();
1047 // Otherwise, remember that we have this instruction.
1048 AvailableCalls.insert(
1049 Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1053 // A release fence requires that all stores complete before it, but does
1054 // not prevent the reordering of following loads 'before' the fence. As a
1055 // result, we don't need to consider it as writing to memory and don't need
1056 // to advance the generation. We do need to prevent DSE across the fence,
1057 // but that's handled above.
1058 if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1059 if (FI->getOrdering() == AtomicOrdering::Release) {
1060 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1064 // write back DSE - If we write back the same value we just loaded from
1065 // the same location and haven't passed any intervening writes or ordering
1066 // operations, we can remove the write. The primary benefit is in allowing
1067 // the available load table to remain valid and value forward past where
1068 // the store originally was.
1069 if (MemInst.isValid() && MemInst.isStore()) {
1070 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1071 if (InVal.DefInst &&
1072 InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1073 InVal.MatchingId == MemInst.getMatchingId() &&
1074 // We don't yet handle removing stores with ordering of any kind.
1075 !MemInst.isVolatile() && MemInst.isUnordered() &&
1076 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1077 isSameMemGeneration(InVal.Generation, CurrentGeneration,
1078 InVal.DefInst, Inst))) {
1079 // It is okay to have a LastStore to a different pointer here if MemorySSA
1080 // tells us that the load and store are from the same memory generation.
1081 // In that case, LastStore should keep its present value since we're
1082 // removing the current store.
1083 assert((!LastStore ||
1084 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1085 MemInst.getPointerOperand() ||
1087 "can't have an intervening store if not using MemorySSA!");
1088 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1089 if (!DebugCounter::shouldExecute(CSECounter)) {
1090 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1094 Inst->eraseFromParent();
1097 // We can avoid incrementing the generation count since we were able
1098 // to eliminate this store.
1103 // Okay, this isn't something we can CSE at all. Check to see if it is
1104 // something that could modify memory. If so, our available memory values
1105 // cannot be used so bump the generation count.
1106 if (Inst->mayWriteToMemory()) {
1107 ++CurrentGeneration;
1109 if (MemInst.isValid() && MemInst.isStore()) {
1110 // We do a trivial form of DSE if there are two stores to the same
1111 // location with no intervening loads. Delete the earlier store.
1112 // At the moment, we don't remove ordered stores, but do remove
1113 // unordered atomic stores. There's no special requirement (for
1114 // unordered atomics) about removing atomic stores only in favor of
1115 // other atomic stores since we we're going to execute the non-atomic
1116 // one anyway and the atomic one might never have become visible.
1118 ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1119 assert(LastStoreMemInst.isUnordered() &&
1120 !LastStoreMemInst.isVolatile() &&
1121 "Violated invariant");
1122 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1123 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1124 << " due to: " << *Inst << '\n');
1125 if (!DebugCounter::shouldExecute(CSECounter)) {
1126 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1128 removeMSSA(LastStore);
1129 LastStore->eraseFromParent();
1132 LastStore = nullptr;
1135 // fallthrough - we can exploit information about this store
1138 // Okay, we just invalidated anything we knew about loaded values. Try
1139 // to salvage *something* by remembering that the stored value is a live
1140 // version of the pointer. It is safe to forward from volatile stores
1141 // to non-volatile loads, so we don't have to check for volatility of
1143 AvailableLoads.insert(
1144 MemInst.getPointerOperand(),
1145 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1146 MemInst.isAtomic()));
1148 // Remember that this was the last unordered store we saw for DSE. We
1149 // don't yet handle DSE on ordered or volatile stores since we don't
1150 // have a good way to model the ordering requirement for following
1151 // passes once the store is removed. We could insert a fence, but
1152 // since fences are slightly stronger than stores in their ordering,
1153 // it's not clear this is a profitable transform. Another option would
1154 // be to merge the ordering with that of the post dominating store.
1155 if (MemInst.isUnordered() && !MemInst.isVolatile())
1158 LastStore = nullptr;
1166 bool EarlyCSE::run() {
1167 // Note, deque is being used here because there is significant performance
1168 // gains over vector when the container becomes very large due to the
1169 // specific access patterns. For more information see the mailing list
1170 // discussion on this:
1171 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1172 std::deque<StackNode *> nodesToProcess;
1174 bool Changed = false;
1176 // Process the root node.
1177 nodesToProcess.push_back(new StackNode(
1178 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1179 CurrentGeneration, DT.getRootNode(),
1180 DT.getRootNode()->begin(), DT.getRootNode()->end()));
1182 // Save the current generation.
1183 unsigned LiveOutGeneration = CurrentGeneration;
1185 // Process the stack.
1186 while (!nodesToProcess.empty()) {
1187 // Grab the first item off the stack. Set the current generation, remove
1188 // the node from the stack, and process it.
1189 StackNode *NodeToProcess = nodesToProcess.back();
1191 // Initialize class members.
1192 CurrentGeneration = NodeToProcess->currentGeneration();
1194 // Check if the node needs to be processed.
1195 if (!NodeToProcess->isProcessed()) {
1196 // Process the node.
1197 Changed |= processNode(NodeToProcess->node());
1198 NodeToProcess->childGeneration(CurrentGeneration);
1199 NodeToProcess->process();
1200 } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1201 // Push the next child onto the stack.
1202 DomTreeNode *child = NodeToProcess->nextChild();
1203 nodesToProcess.push_back(
1204 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1205 AvailableCalls, NodeToProcess->childGeneration(),
1206 child, child->begin(), child->end()));
1208 // It has been processed, and there are no more children to process,
1209 // so delete it and pop it off the stack.
1210 delete NodeToProcess;
1211 nodesToProcess.pop_back();
1213 } // while (!nodes...)
1215 // Reset the current generation.
1216 CurrentGeneration = LiveOutGeneration;
1221 PreservedAnalyses EarlyCSEPass::run(Function &F,
1222 FunctionAnalysisManager &AM) {
1223 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1224 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1225 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1226 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1228 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1230 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1233 return PreservedAnalyses::all();
1235 PreservedAnalyses PA;
1236 PA.preserveSet<CFGAnalyses>();
1237 PA.preserve<GlobalsAA>();
1239 PA.preserve<MemorySSAAnalysis>();
1245 /// A simple and fast domtree-based CSE pass.
1247 /// This pass does a simple depth-first walk over the dominator tree,
1248 /// eliminating trivially redundant instructions and using instsimplify to
1249 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1250 /// cases so that instcombine and other passes are more effective. It is
1251 /// expected that a later pass of GVN will catch the interesting/hard cases.
1252 template<bool UseMemorySSA>
1253 class EarlyCSELegacyCommonPass : public FunctionPass {
1257 EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1259 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1261 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1264 bool runOnFunction(Function &F) override {
1265 if (skipFunction(F))
1268 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1269 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1270 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1271 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1273 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1275 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1280 void getAnalysisUsage(AnalysisUsage &AU) const override {
1281 AU.addRequired<AssumptionCacheTracker>();
1282 AU.addRequired<DominatorTreeWrapperPass>();
1283 AU.addRequired<TargetLibraryInfoWrapperPass>();
1284 AU.addRequired<TargetTransformInfoWrapperPass>();
1286 AU.addRequired<MemorySSAWrapperPass>();
1287 AU.addPreserved<MemorySSAWrapperPass>();
1289 AU.addPreserved<GlobalsAAWrapperPass>();
1290 AU.setPreservesCFG();
1294 } // end anonymous namespace
1296 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1299 char EarlyCSELegacyPass::ID = 0;
1301 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1303 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1304 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1305 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1306 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1307 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1309 using EarlyCSEMemSSALegacyPass =
1310 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1313 char EarlyCSEMemSSALegacyPass::ID = 0;
1315 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1317 return new EarlyCSEMemSSALegacyPass();
1319 return new EarlyCSELegacyPass();
1322 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1323 "Early CSE w/ MemorySSA", false, false)
1324 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1325 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1326 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1327 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1328 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1329 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1330 "Early CSE w/ MemorySSA", false, false)