1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a simple dominator tree walk that eliminates trivially
11 // redundant instructions.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Scalar/EarlyCSE.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/ScopedHashTable.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/GlobalsModRef.h"
21 #include "llvm/Analysis/InstructionSimplify.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/RecyclingAllocator.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Transforms/Scalar.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/MemorySSA.h"
38 using namespace llvm::PatternMatch;
40 #define DEBUG_TYPE "early-cse"
42 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
43 STATISTIC(NumCSE, "Number of instructions CSE'd");
44 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
45 STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
46 STATISTIC(NumCSECall, "Number of call instructions CSE'd");
47 STATISTIC(NumDSE, "Number of trivial dead stores removed");
49 //===----------------------------------------------------------------------===//
51 //===----------------------------------------------------------------------===//
54 /// \brief Struct representing the available values in the scoped hash table.
58 SimpleValue(Instruction *I) : Inst(I) {
59 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
62 bool isSentinel() const {
63 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
64 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
67 static bool canHandle(Instruction *Inst) {
68 // This can only handle non-void readnone functions.
69 if (CallInst *CI = dyn_cast<CallInst>(Inst))
70 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
71 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
72 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
73 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
74 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
75 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
81 template <> struct DenseMapInfo<SimpleValue> {
82 static inline SimpleValue getEmptyKey() {
83 return DenseMapInfo<Instruction *>::getEmptyKey();
85 static inline SimpleValue getTombstoneKey() {
86 return DenseMapInfo<Instruction *>::getTombstoneKey();
88 static unsigned getHashValue(SimpleValue Val);
89 static bool isEqual(SimpleValue LHS, SimpleValue RHS);
93 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
94 Instruction *Inst = Val.Inst;
95 // Hash in all of the operands as pointers.
96 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
97 Value *LHS = BinOp->getOperand(0);
98 Value *RHS = BinOp->getOperand(1);
99 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
102 return hash_combine(BinOp->getOpcode(), LHS, RHS);
105 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
106 Value *LHS = CI->getOperand(0);
107 Value *RHS = CI->getOperand(1);
108 CmpInst::Predicate Pred = CI->getPredicate();
109 if (Inst->getOperand(0) > Inst->getOperand(1)) {
111 Pred = CI->getSwappedPredicate();
113 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
116 if (CastInst *CI = dyn_cast<CastInst>(Inst))
117 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
119 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
120 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
121 hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
123 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
124 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
126 hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
128 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
129 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
130 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
131 isa<ShuffleVectorInst>(Inst)) &&
132 "Invalid/unknown instruction");
134 // Mix in the opcode.
137 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
140 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
141 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
143 if (LHS.isSentinel() || RHS.isSentinel())
146 if (LHSI->getOpcode() != RHSI->getOpcode())
148 if (LHSI->isIdenticalToWhenDefined(RHSI))
151 // If we're not strictly identical, we still might be a commutable instruction
152 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
153 if (!LHSBinOp->isCommutative())
156 assert(isa<BinaryOperator>(RHSI) &&
157 "same opcode, but different instruction type?");
158 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
161 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
162 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
164 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
165 assert(isa<CmpInst>(RHSI) &&
166 "same opcode, but different instruction type?");
167 CmpInst *RHSCmp = cast<CmpInst>(RHSI);
169 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
170 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
171 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
177 //===----------------------------------------------------------------------===//
179 //===----------------------------------------------------------------------===//
182 /// \brief Struct representing the available call values in the scoped hash
187 CallValue(Instruction *I) : Inst(I) {
188 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
191 bool isSentinel() const {
192 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
193 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
196 static bool canHandle(Instruction *Inst) {
197 // Don't value number anything that returns void.
198 if (Inst->getType()->isVoidTy())
201 CallInst *CI = dyn_cast<CallInst>(Inst);
202 if (!CI || !CI->onlyReadsMemory())
210 template <> struct DenseMapInfo<CallValue> {
211 static inline CallValue getEmptyKey() {
212 return DenseMapInfo<Instruction *>::getEmptyKey();
214 static inline CallValue getTombstoneKey() {
215 return DenseMapInfo<Instruction *>::getTombstoneKey();
217 static unsigned getHashValue(CallValue Val);
218 static bool isEqual(CallValue LHS, CallValue RHS);
222 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
223 Instruction *Inst = Val.Inst;
224 // Hash all of the operands as pointers and mix in the opcode.
227 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
230 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
231 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
232 if (LHS.isSentinel() || RHS.isSentinel())
234 return LHSI->isIdenticalTo(RHSI);
237 //===----------------------------------------------------------------------===//
238 // EarlyCSE implementation
239 //===----------------------------------------------------------------------===//
242 /// \brief A simple and fast domtree-based CSE pass.
244 /// This pass does a simple depth-first walk over the dominator tree,
245 /// eliminating trivially redundant instructions and using instsimplify to
246 /// canonicalize things as it goes. It is intended to be fast and catch obvious
247 /// cases so that instcombine and other passes are more effective. It is
248 /// expected that a later pass of GVN will catch the interesting/hard cases.
251 const TargetLibraryInfo &TLI;
252 const TargetTransformInfo &TTI;
256 typedef RecyclingAllocator<
257 BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy;
258 typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
259 AllocatorTy> ScopedHTType;
261 /// \brief A scoped hash table of the current values of all of our simple
262 /// scalar expressions.
264 /// As we walk down the domtree, we look to see if instructions are in this:
265 /// if so, we replace them with what we find, otherwise we insert them so
266 /// that dominated values can succeed in their lookup.
267 ScopedHTType AvailableValues;
269 /// A scoped hash table of the current values of previously encounted memory
272 /// This allows us to get efficient access to dominating loads or stores when
273 /// we have a fully redundant load. In addition to the most recent load, we
274 /// keep track of a generation count of the read, which is compared against
275 /// the current generation count. The current generation count is incremented
276 /// after every possibly writing memory operation, which ensures that we only
277 /// CSE loads with other loads that have no intervening store. Ordering
278 /// events (such as fences or atomic instructions) increment the generation
279 /// count as well; essentially, we model these as writes to all possible
280 /// locations. Note that atomic and/or volatile loads and stores can be
281 /// present the table; it is the responsibility of the consumer to inspect
282 /// the atomicity/volatility if needed.
284 Instruction *DefInst;
290 : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false),
291 IsInvariant(false) {}
292 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
293 bool IsAtomic, bool IsInvariant)
294 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
295 IsAtomic(IsAtomic), IsInvariant(IsInvariant) {}
297 typedef RecyclingAllocator<BumpPtrAllocator,
298 ScopedHashTableVal<Value *, LoadValue>>
300 typedef ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
301 LoadMapAllocator> LoadHTType;
302 LoadHTType AvailableLoads;
304 /// \brief A scoped hash table of the current values of read-only call
307 /// It uses the same generation count as loads.
308 typedef ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>
310 CallHTType AvailableCalls;
312 /// \brief This is the current generation of the memory value.
313 unsigned CurrentGeneration;
315 /// \brief Set up the EarlyCSE runner for a particular function.
316 EarlyCSE(const TargetLibraryInfo &TLI, const TargetTransformInfo &TTI,
317 DominatorTree &DT, AssumptionCache &AC, MemorySSA *MSSA)
318 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), MSSA(MSSA), CurrentGeneration(0) {}
323 // Almost a POD, but needs to call the constructors for the scoped hash
324 // tables so that a new scope gets pushed on. These are RAII so that the
325 // scope gets popped when the NodeScope is destroyed.
328 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
329 CallHTType &AvailableCalls)
330 : Scope(AvailableValues), LoadScope(AvailableLoads),
331 CallScope(AvailableCalls) {}
334 NodeScope(const NodeScope &) = delete;
335 void operator=(const NodeScope &) = delete;
337 ScopedHTType::ScopeTy Scope;
338 LoadHTType::ScopeTy LoadScope;
339 CallHTType::ScopeTy CallScope;
342 // Contains all the needed information to create a stack for doing a depth
343 // first traversal of the tree. This includes scopes for values, loads, and
344 // calls as well as the generation. There is a child iterator so that the
345 // children do not need to be store separately.
348 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
349 CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n,
350 DomTreeNode::iterator child, DomTreeNode::iterator end)
351 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
352 EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls),
356 unsigned currentGeneration() { return CurrentGeneration; }
357 unsigned childGeneration() { return ChildGeneration; }
358 void childGeneration(unsigned generation) { ChildGeneration = generation; }
359 DomTreeNode *node() { return Node; }
360 DomTreeNode::iterator childIter() { return ChildIter; }
361 DomTreeNode *nextChild() {
362 DomTreeNode *child = *ChildIter;
366 DomTreeNode::iterator end() { return EndIter; }
367 bool isProcessed() { return Processed; }
368 void process() { Processed = true; }
371 StackNode(const StackNode &) = delete;
372 void operator=(const StackNode &) = delete;
375 unsigned CurrentGeneration;
376 unsigned ChildGeneration;
378 DomTreeNode::iterator ChildIter;
379 DomTreeNode::iterator EndIter;
384 /// \brief Wrapper class to handle memory instructions, including loads,
385 /// stores and intrinsic loads and stores defined by the target.
386 class ParseMemoryInst {
388 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
389 : IsTargetMemInst(false), Inst(Inst) {
390 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
391 if (TTI.getTgtMemIntrinsic(II, Info) && Info.NumMemRefs == 1)
392 IsTargetMemInst = true;
394 bool isLoad() const {
395 if (IsTargetMemInst) return Info.ReadMem;
396 return isa<LoadInst>(Inst);
398 bool isStore() const {
399 if (IsTargetMemInst) return Info.WriteMem;
400 return isa<StoreInst>(Inst);
402 bool isAtomic() const {
403 if (IsTargetMemInst) {
404 assert(Info.IsSimple && "need to refine IsSimple in TTI");
407 return Inst->isAtomic();
409 bool isUnordered() const {
410 if (IsTargetMemInst) {
411 assert(Info.IsSimple && "need to refine IsSimple in TTI");
414 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
415 return LI->isUnordered();
416 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
417 return SI->isUnordered();
419 // Conservative answer
420 return !Inst->isAtomic();
423 bool isVolatile() const {
424 if (IsTargetMemInst) {
425 assert(Info.IsSimple && "need to refine IsSimple in TTI");
428 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
429 return LI->isVolatile();
430 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
431 return SI->isVolatile();
433 // Conservative answer
437 bool isInvariantLoad() const {
438 if (auto *LI = dyn_cast<LoadInst>(Inst))
439 return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
443 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
444 return (getPointerOperand() == Inst.getPointerOperand() &&
445 getMatchingId() == Inst.getMatchingId());
447 bool isValid() const { return getPointerOperand() != nullptr; }
449 // For regular (non-intrinsic) loads/stores, this is set to -1. For
450 // intrinsic loads/stores, the id is retrieved from the corresponding
451 // field in the MemIntrinsicInfo structure. That field contains
452 // non-negative values only.
453 int getMatchingId() const {
454 if (IsTargetMemInst) return Info.MatchingId;
457 Value *getPointerOperand() const {
458 if (IsTargetMemInst) return Info.PtrVal;
459 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
460 return LI->getPointerOperand();
461 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
462 return SI->getPointerOperand();
466 bool mayReadFromMemory() const {
467 if (IsTargetMemInst) return Info.ReadMem;
468 return Inst->mayReadFromMemory();
470 bool mayWriteToMemory() const {
471 if (IsTargetMemInst) return Info.WriteMem;
472 return Inst->mayWriteToMemory();
476 bool IsTargetMemInst;
477 MemIntrinsicInfo Info;
481 bool processNode(DomTreeNode *Node);
483 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
484 if (auto *LI = dyn_cast<LoadInst>(Inst))
486 if (auto *SI = dyn_cast<StoreInst>(Inst))
487 return SI->getValueOperand();
488 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
489 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
493 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
494 Instruction *EarlierInst, Instruction *LaterInst);
496 void removeMSSA(Instruction *Inst) {
499 // Removing a store here can leave MemorySSA in an unoptimized state by
500 // creating MemoryPhis that have identical arguments and by creating
501 // MemoryUses whose defining access is not an actual clobber. We handle the
502 // phi case eagerly here. The non-optimized MemoryUse case is lazily
503 // updated by MemorySSA getClobberingMemoryAccess.
504 if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
505 // Optimize MemoryPhi nodes that may become redundant by having all the
506 // same input values once MA is removed.
507 SmallVector<MemoryPhi *, 4> PhisToCheck;
508 SmallVector<MemoryAccess *, 8> WorkQueue;
509 WorkQueue.push_back(MA);
510 // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
511 // we shouldn't be processing that many phis and this will avoid an
512 // allocation in almost all cases.
513 for (unsigned I = 0; I < WorkQueue.size(); ++I) {
514 MemoryAccess *WI = WorkQueue[I];
516 for (auto *U : WI->users())
517 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
518 PhisToCheck.push_back(MP);
520 MSSA->removeMemoryAccess(WI);
522 for (MemoryPhi *MP : PhisToCheck) {
523 MemoryAccess *FirstIn = MP->getIncomingValue(0);
524 if (all_of(MP->incoming_values(),
525 [=](Use &In) { return In == FirstIn; }))
526 WorkQueue.push_back(MP);
535 /// Determine if the memory referenced by LaterInst is from the same heap
536 /// version as EarlierInst.
537 /// This is currently called in two scenarios:
549 /// in both cases we want to verify that there are no possible writes to the
550 /// memory referenced by p between the earlier and later instruction.
551 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
552 unsigned LaterGeneration,
553 Instruction *EarlierInst,
554 Instruction *LaterInst) {
555 // Check the simple memory generation tracking first.
556 if (EarlierGeneration == LaterGeneration)
562 // Since we know LaterDef dominates LaterInst and EarlierInst dominates
563 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
564 // EarlierInst and LaterInst and neither can any other write that potentially
565 // clobbers LaterInst.
566 MemoryAccess *LaterDef =
567 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
568 return MSSA->dominates(LaterDef, MSSA->getMemoryAccess(EarlierInst));
571 bool EarlyCSE::processNode(DomTreeNode *Node) {
572 bool Changed = false;
573 BasicBlock *BB = Node->getBlock();
575 // If this block has a single predecessor, then the predecessor is the parent
576 // of the domtree node and all of the live out memory values are still current
577 // in this block. If this block has multiple predecessors, then they could
578 // have invalidated the live-out memory values of our parent value. For now,
579 // just be conservative and invalidate memory if this block has multiple
581 if (!BB->getSinglePredecessor())
584 // If this node has a single predecessor which ends in a conditional branch,
585 // we can infer the value of the branch condition given that we took this
586 // path. We need the single predecessor to ensure there's not another path
587 // which reaches this block where the condition might hold a different
588 // value. Since we're adding this to the scoped hash table (like any other
589 // def), it will have been popped if we encounter a future merge block.
590 if (BasicBlock *Pred = BB->getSinglePredecessor())
591 if (auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
592 if (BI->isConditional())
593 if (auto *CondInst = dyn_cast<Instruction>(BI->getCondition()))
594 if (SimpleValue::canHandle(CondInst)) {
595 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
596 auto *ConditionalConstant = (BI->getSuccessor(0) == BB) ?
597 ConstantInt::getTrue(BB->getContext()) :
598 ConstantInt::getFalse(BB->getContext());
599 AvailableValues.insert(CondInst, ConditionalConstant);
600 DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
601 << CondInst->getName() << "' as " << *ConditionalConstant
602 << " in " << BB->getName() << "\n");
603 // Replace all dominated uses with the known value.
605 replaceDominatedUsesWith(CondInst, ConditionalConstant, DT,
606 BasicBlockEdge(Pred, BB))) {
608 NumCSECVP = NumCSECVP + Count;
612 /// LastStore - Keep track of the last non-volatile store that we saw... for
613 /// as long as there in no instruction that reads memory. If we see a store
614 /// to the same location, we delete the dead store. This zaps trivial dead
615 /// stores which can occur in bitfield code among other things.
616 Instruction *LastStore = nullptr;
618 const DataLayout &DL = BB->getModule()->getDataLayout();
620 // See if any instructions in the block can be eliminated. If so, do it. If
621 // not, add them to AvailableValues.
622 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
623 Instruction *Inst = &*I++;
625 // Dead instructions should just be removed.
626 if (isInstructionTriviallyDead(Inst, &TLI)) {
627 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
629 Inst->eraseFromParent();
635 // Skip assume intrinsics, they don't really have side effects (although
636 // they're marked as such to ensure preservation of control dependencies),
637 // and this pass will not disturb any of the assumption's control
639 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
640 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
644 // Skip invariant.start intrinsics since they only read memory, and we can
645 // forward values across it. Also, we dont need to consume the last store
646 // since the semantics of invariant.start allow us to perform DSE of the
647 // last store, if there was a store following invariant.start. Consider:
650 // invariant.start(p)
652 // We can DSE the store to 30, since the store 40 to invariant location p
653 // causes undefined behaviour.
654 if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>()))
657 if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) {
659 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
660 // The condition we're on guarding here is true for all dominated
662 if (SimpleValue::canHandle(CondI))
663 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
666 // Guard intrinsics read all memory, but don't write any memory.
667 // Accordingly, don't update the generation but consume the last store (to
668 // avoid an incorrect DSE).
673 // If the instruction can be simplified (e.g. X+0 = X) then replace it with
674 // its simpler value.
675 if (Value *V = SimplifyInstruction(Inst, DL, &TLI, &DT, &AC)) {
676 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
678 if (!Inst->use_empty()) {
679 Inst->replaceAllUsesWith(V);
682 if (isInstructionTriviallyDead(Inst, &TLI)) {
684 Inst->eraseFromParent();
694 // If this is a simple instruction that we can value number, process it.
695 if (SimpleValue::canHandle(Inst)) {
696 // See if the instruction has an available value. If so, use it.
697 if (Value *V = AvailableValues.lookup(Inst)) {
698 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n');
699 if (auto *I = dyn_cast<Instruction>(V))
701 Inst->replaceAllUsesWith(V);
703 Inst->eraseFromParent();
709 // Otherwise, just remember that this value is available.
710 AvailableValues.insert(Inst, Inst);
714 ParseMemoryInst MemInst(Inst, TTI);
715 // If this is a non-volatile load, process it.
716 if (MemInst.isValid() && MemInst.isLoad()) {
717 // (conservatively) we can't peak past the ordering implied by this
718 // operation, but we can add this load to our set of available values
719 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
724 // If we have an available version of this load, and if it is the right
725 // generation or the load is known to be from an invariant location,
726 // replace this instruction.
728 // If either the dominating load or the current load are invariant, then
729 // we can assume the current load loads the same value as the dominating
731 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
732 if (InVal.DefInst != nullptr &&
733 InVal.MatchingId == MemInst.getMatchingId() &&
734 // We don't yet handle removing loads with ordering of any kind.
735 !MemInst.isVolatile() && MemInst.isUnordered() &&
736 // We can't replace an atomic load with one which isn't also atomic.
737 InVal.IsAtomic >= MemInst.isAtomic() &&
738 (InVal.IsInvariant || MemInst.isInvariantLoad() ||
739 isSameMemGeneration(InVal.Generation, CurrentGeneration,
740 InVal.DefInst, Inst))) {
741 Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
743 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
744 << " to: " << *InVal.DefInst << '\n');
745 if (!Inst->use_empty())
746 Inst->replaceAllUsesWith(Op);
748 Inst->eraseFromParent();
755 // Otherwise, remember that we have this instruction.
756 AvailableLoads.insert(
757 MemInst.getPointerOperand(),
758 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
759 MemInst.isAtomic(), MemInst.isInvariantLoad()));
764 // If this instruction may read from memory, forget LastStore.
765 // Load/store intrinsics will indicate both a read and a write to
766 // memory. The target may override this (e.g. so that a store intrinsic
767 // does not read from memory, and thus will be treated the same as a
768 // regular store for commoning purposes).
769 if (Inst->mayReadFromMemory() &&
770 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
773 // If this is a read-only call, process it.
774 if (CallValue::canHandle(Inst)) {
775 // If we have an available version of this call, and if it is the right
776 // generation, replace this instruction.
777 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
778 if (InVal.first != nullptr &&
779 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
781 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
782 << " to: " << *InVal.first << '\n');
783 if (!Inst->use_empty())
784 Inst->replaceAllUsesWith(InVal.first);
786 Inst->eraseFromParent();
792 // Otherwise, remember that we have this instruction.
793 AvailableCalls.insert(
794 Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
798 // A release fence requires that all stores complete before it, but does
799 // not prevent the reordering of following loads 'before' the fence. As a
800 // result, we don't need to consider it as writing to memory and don't need
801 // to advance the generation. We do need to prevent DSE across the fence,
802 // but that's handled above.
803 if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
804 if (FI->getOrdering() == AtomicOrdering::Release) {
805 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
809 // write back DSE - If we write back the same value we just loaded from
810 // the same location and haven't passed any intervening writes or ordering
811 // operations, we can remove the write. The primary benefit is in allowing
812 // the available load table to remain valid and value forward past where
813 // the store originally was.
814 if (MemInst.isValid() && MemInst.isStore()) {
815 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
817 InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
818 InVal.MatchingId == MemInst.getMatchingId() &&
819 // We don't yet handle removing stores with ordering of any kind.
820 !MemInst.isVolatile() && MemInst.isUnordered() &&
821 isSameMemGeneration(InVal.Generation, CurrentGeneration,
822 InVal.DefInst, Inst)) {
823 // It is okay to have a LastStore to a different pointer here if MemorySSA
824 // tells us that the load and store are from the same memory generation.
825 // In that case, LastStore should keep its present value since we're
826 // removing the current store.
827 assert((!LastStore ||
828 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
829 MemInst.getPointerOperand() ||
831 "can't have an intervening store if not using MemorySSA!");
832 DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
834 Inst->eraseFromParent();
837 // We can avoid incrementing the generation count since we were able
838 // to eliminate this store.
843 // Okay, this isn't something we can CSE at all. Check to see if it is
844 // something that could modify memory. If so, our available memory values
845 // cannot be used so bump the generation count.
846 if (Inst->mayWriteToMemory()) {
849 if (MemInst.isValid() && MemInst.isStore()) {
850 // We do a trivial form of DSE if there are two stores to the same
851 // location with no intervening loads. Delete the earlier store.
852 // At the moment, we don't remove ordered stores, but do remove
853 // unordered atomic stores. There's no special requirement (for
854 // unordered atomics) about removing atomic stores only in favor of
855 // other atomic stores since we we're going to execute the non-atomic
856 // one anyway and the atomic one might never have become visible.
858 ParseMemoryInst LastStoreMemInst(LastStore, TTI);
859 assert(LastStoreMemInst.isUnordered() &&
860 !LastStoreMemInst.isVolatile() &&
861 "Violated invariant");
862 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
863 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
864 << " due to: " << *Inst << '\n');
865 removeMSSA(LastStore);
866 LastStore->eraseFromParent();
871 // fallthrough - we can exploit information about this store
874 // Okay, we just invalidated anything we knew about loaded values. Try
875 // to salvage *something* by remembering that the stored value is a live
876 // version of the pointer. It is safe to forward from volatile stores
877 // to non-volatile loads, so we don't have to check for volatility of
879 AvailableLoads.insert(
880 MemInst.getPointerOperand(),
881 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
882 MemInst.isAtomic(), /*IsInvariant=*/false));
884 // Remember that this was the last unordered store we saw for DSE. We
885 // don't yet handle DSE on ordered or volatile stores since we don't
886 // have a good way to model the ordering requirement for following
887 // passes once the store is removed. We could insert a fence, but
888 // since fences are slightly stronger than stores in their ordering,
889 // it's not clear this is a profitable transform. Another option would
890 // be to merge the ordering with that of the post dominating store.
891 if (MemInst.isUnordered() && !MemInst.isVolatile())
902 bool EarlyCSE::run() {
903 // Note, deque is being used here because there is significant performance
904 // gains over vector when the container becomes very large due to the
905 // specific access patterns. For more information see the mailing list
906 // discussion on this:
907 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
908 std::deque<StackNode *> nodesToProcess;
910 bool Changed = false;
912 // Process the root node.
913 nodesToProcess.push_back(new StackNode(
914 AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration,
915 DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end()));
917 // Save the current generation.
918 unsigned LiveOutGeneration = CurrentGeneration;
920 // Process the stack.
921 while (!nodesToProcess.empty()) {
922 // Grab the first item off the stack. Set the current generation, remove
923 // the node from the stack, and process it.
924 StackNode *NodeToProcess = nodesToProcess.back();
926 // Initialize class members.
927 CurrentGeneration = NodeToProcess->currentGeneration();
929 // Check if the node needs to be processed.
930 if (!NodeToProcess->isProcessed()) {
932 Changed |= processNode(NodeToProcess->node());
933 NodeToProcess->childGeneration(CurrentGeneration);
934 NodeToProcess->process();
935 } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
936 // Push the next child onto the stack.
937 DomTreeNode *child = NodeToProcess->nextChild();
938 nodesToProcess.push_back(
939 new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
940 NodeToProcess->childGeneration(), child, child->begin(),
943 // It has been processed, and there are no more children to process,
944 // so delete it and pop it off the stack.
945 delete NodeToProcess;
946 nodesToProcess.pop_back();
948 } // while (!nodes...)
950 // Reset the current generation.
951 CurrentGeneration = LiveOutGeneration;
956 PreservedAnalyses EarlyCSEPass::run(Function &F,
957 FunctionAnalysisManager &AM) {
958 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
959 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
960 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
961 auto &AC = AM.getResult<AssumptionAnalysis>(F);
963 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
965 EarlyCSE CSE(TLI, TTI, DT, AC, MSSA);
968 return PreservedAnalyses::all();
970 // CSE preserves the dominator tree because it doesn't mutate the CFG.
971 // FIXME: Bundle this with other CFG-preservation.
972 PreservedAnalyses PA;
973 PA.preserve<DominatorTreeAnalysis>();
974 PA.preserve<GlobalsAA>();
976 PA.preserve<MemorySSAAnalysis>();
981 /// \brief A simple and fast domtree-based CSE pass.
983 /// This pass does a simple depth-first walk over the dominator tree,
984 /// eliminating trivially redundant instructions and using instsimplify to
985 /// canonicalize things as it goes. It is intended to be fast and catch obvious
986 /// cases so that instcombine and other passes are more effective. It is
987 /// expected that a later pass of GVN will catch the interesting/hard cases.
988 template<bool UseMemorySSA>
989 class EarlyCSELegacyCommonPass : public FunctionPass {
993 EarlyCSELegacyCommonPass() : FunctionPass(ID) {
995 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
997 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1000 bool runOnFunction(Function &F) override {
1001 if (skipFunction(F))
1004 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1005 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1006 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1007 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1009 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1011 EarlyCSE CSE(TLI, TTI, DT, AC, MSSA);
1016 void getAnalysisUsage(AnalysisUsage &AU) const override {
1017 AU.addRequired<AssumptionCacheTracker>();
1018 AU.addRequired<DominatorTreeWrapperPass>();
1019 AU.addRequired<TargetLibraryInfoWrapperPass>();
1020 AU.addRequired<TargetTransformInfoWrapperPass>();
1022 AU.addRequired<MemorySSAWrapperPass>();
1023 AU.addPreserved<MemorySSAWrapperPass>();
1025 AU.addPreserved<GlobalsAAWrapperPass>();
1026 AU.setPreservesCFG();
1031 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1034 char EarlyCSELegacyPass::ID = 0;
1036 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1038 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1039 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1040 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1041 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1042 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1044 using EarlyCSEMemSSALegacyPass =
1045 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1048 char EarlyCSEMemSSALegacyPass::ID = 0;
1050 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1052 return new EarlyCSEMemSSALegacyPass();
1054 return new EarlyCSELegacyPass();
1057 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1058 "Early CSE w/ MemorySSA", false, false)
1059 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1060 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1061 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1062 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1063 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1064 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1065 "Early CSE w/ MemorySSA", false, false)