1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass performs a simple dominator tree walk that eliminates trivially
10 // redundant instructions.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Scalar/EarlyCSE.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/ScopedHashTable.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/GuardUtils.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/InstrTypes.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/PassManager.h"
43 #include "llvm/IR/PatternMatch.h"
44 #include "llvm/IR/Statepoint.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/InitializePasses.h"
49 #include "llvm/Pass.h"
50 #include "llvm/Support/Allocator.h"
51 #include "llvm/Support/AtomicOrdering.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/DebugCounter.h"
55 #include "llvm/Support/RecyclingAllocator.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
59 #include "llvm/Transforms/Utils/GuardUtils.h"
60 #include "llvm/Transforms/Utils/Local.h"
67 using namespace llvm::PatternMatch;
69 #define DEBUG_TYPE "early-cse"
71 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
72 STATISTIC(NumCSE, "Number of instructions CSE'd");
73 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
74 STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
75 STATISTIC(NumCSECall, "Number of call instructions CSE'd");
76 STATISTIC(NumDSE, "Number of trivial dead stores removed");
78 DEBUG_COUNTER(CSECounter, "early-cse",
79 "Controls which instructions are removed");
81 static cl::opt<unsigned> EarlyCSEMssaOptCap(
82 "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden,
83 cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
84 "for faster compile. Caps the MemorySSA clobbering calls."));
86 static cl::opt<bool> EarlyCSEDebugHash(
87 "earlycse-debug-hash", cl::init(false), cl::Hidden,
88 cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
89 "function is well-behaved w.r.t. its isEqual predicate"));
91 //===----------------------------------------------------------------------===//
93 //===----------------------------------------------------------------------===//
97 /// Struct representing the available values in the scoped hash table.
101 SimpleValue(Instruction *I) : Inst(I) {
102 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
105 bool isSentinel() const {
106 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
107 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
110 static bool canHandle(Instruction *Inst) {
111 // This can only handle non-void readnone functions.
112 if (CallInst *CI = dyn_cast<CallInst>(Inst))
113 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
114 return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) ||
115 isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
116 isa<CmpInst>(Inst) || isa<SelectInst>(Inst) ||
117 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
118 isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) ||
119 isa<InsertValueInst>(Inst) || isa<FreezeInst>(Inst);
123 } // end anonymous namespace
127 template <> struct DenseMapInfo<SimpleValue> {
128 static inline SimpleValue getEmptyKey() {
129 return DenseMapInfo<Instruction *>::getEmptyKey();
132 static inline SimpleValue getTombstoneKey() {
133 return DenseMapInfo<Instruction *>::getTombstoneKey();
136 static unsigned getHashValue(SimpleValue Val);
137 static bool isEqual(SimpleValue LHS, SimpleValue RHS);
140 } // end namespace llvm
142 /// Match a 'select' including an optional 'not's of the condition.
143 static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A,
145 SelectPatternFlavor &Flavor) {
146 // Return false if V is not even a select.
147 if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))))
150 // Look through a 'not' of the condition operand by swapping A/B.
152 if (match(Cond, m_Not(m_Value(CondNot)))) {
157 // Match canonical forms of abs/nabs/min/max. We are not using ValueTracking's
158 // more powerful matchSelectPattern() because it may rely on instruction flags
159 // such as "nsw". That would be incompatible with the current hashing
160 // mechanism that may remove flags to increase the likelihood of CSE.
162 // These are the canonical forms of abs(X) and nabs(X) created by instcombine:
163 // %N = sub i32 0, %X
164 // %C = icmp slt i32 %X, 0
165 // %ABS = select i1 %C, i32 %N, i32 %X
167 // %N = sub i32 0, %X
168 // %C = icmp slt i32 %X, 0
169 // %NABS = select i1 %C, i32 %X, i32 %N
170 Flavor = SPF_UNKNOWN;
171 CmpInst::Predicate Pred;
172 if (match(Cond, m_ICmp(Pred, m_Specific(B), m_ZeroInt())) &&
173 Pred == ICmpInst::ICMP_SLT && match(A, m_Neg(m_Specific(B)))) {
174 // ABS: B < 0 ? -B : B
178 if (match(Cond, m_ICmp(Pred, m_Specific(A), m_ZeroInt())) &&
179 Pred == ICmpInst::ICMP_SLT && match(B, m_Neg(m_Specific(A)))) {
180 // NABS: A < 0 ? A : -A
185 if (!match(Cond, m_ICmp(Pred, m_Specific(A), m_Specific(B)))) {
186 // Check for commuted variants of min/max by swapping predicate.
187 // If we do not match the standard or commuted patterns, this is not a
188 // recognized form of min/max, but it is still a select, so return true.
189 if (!match(Cond, m_ICmp(Pred, m_Specific(B), m_Specific(A))))
191 Pred = ICmpInst::getSwappedPredicate(Pred);
195 case CmpInst::ICMP_UGT: Flavor = SPF_UMAX; break;
196 case CmpInst::ICMP_ULT: Flavor = SPF_UMIN; break;
197 case CmpInst::ICMP_SGT: Flavor = SPF_SMAX; break;
198 case CmpInst::ICMP_SLT: Flavor = SPF_SMIN; break;
205 static unsigned getHashValueImpl(SimpleValue Val) {
206 Instruction *Inst = Val.Inst;
207 // Hash in all of the operands as pointers.
208 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
209 Value *LHS = BinOp->getOperand(0);
210 Value *RHS = BinOp->getOperand(1);
211 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
214 return hash_combine(BinOp->getOpcode(), LHS, RHS);
217 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
218 // Compares can be commuted by swapping the comparands and
219 // updating the predicate. Choose the form that has the
220 // comparands in sorted order, or in the case of a tie, the
221 // one with the lower predicate.
222 Value *LHS = CI->getOperand(0);
223 Value *RHS = CI->getOperand(1);
224 CmpInst::Predicate Pred = CI->getPredicate();
225 CmpInst::Predicate SwappedPred = CI->getSwappedPredicate();
226 if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
230 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
233 // Hash general selects to allow matching commuted true/false operands.
234 SelectPatternFlavor SPF;
236 if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) {
237 // Hash min/max/abs (cmp + select) to allow for commuted operands.
238 // Min/max may also have non-canonical compare predicate (eg, the compare for
239 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
241 // TODO: We should also detect FP min/max.
242 if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
243 SPF == SPF_UMIN || SPF == SPF_UMAX) {
246 return hash_combine(Inst->getOpcode(), SPF, A, B);
248 if (SPF == SPF_ABS || SPF == SPF_NABS) {
249 // ABS/NABS always puts the input in A and its negation in B.
250 return hash_combine(Inst->getOpcode(), SPF, A, B);
253 // Hash general selects to allow matching commuted true/false operands.
255 // If we do not have a compare as the condition, just hash in the condition.
256 CmpInst::Predicate Pred;
258 if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y))))
259 return hash_combine(Inst->getOpcode(), Cond, A, B);
261 // Similar to cmp normalization (above) - canonicalize the predicate value:
262 // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
263 if (CmpInst::getInversePredicate(Pred) < Pred) {
264 Pred = CmpInst::getInversePredicate(Pred);
267 return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B);
270 if (CastInst *CI = dyn_cast<CastInst>(Inst))
271 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
273 if (FreezeInst *FI = dyn_cast<FreezeInst>(Inst))
274 return hash_combine(FI->getOpcode(), FI->getOperand(0));
276 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
277 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
278 hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
280 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
281 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
283 hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
285 assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
286 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
287 isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst) ||
288 isa<FreezeInst>(Inst)) &&
289 "Invalid/unknown instruction");
291 // Mix in the opcode.
294 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
297 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
299 // If -earlycse-debug-hash was specified, return a constant -- this
300 // will force all hashing to collide, so we'll exhaustively search
301 // the table for a match, and the assertion in isEqual will fire if
302 // there's a bug causing equal keys to hash differently.
303 if (EarlyCSEDebugHash)
306 return getHashValueImpl(Val);
309 static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
310 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
312 if (LHS.isSentinel() || RHS.isSentinel())
315 if (LHSI->getOpcode() != RHSI->getOpcode())
317 if (LHSI->isIdenticalToWhenDefined(RHSI))
320 // If we're not strictly identical, we still might be a commutable instruction
321 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
322 if (!LHSBinOp->isCommutative())
325 assert(isa<BinaryOperator>(RHSI) &&
326 "same opcode, but different instruction type?");
327 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
330 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
331 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
333 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
334 assert(isa<CmpInst>(RHSI) &&
335 "same opcode, but different instruction type?");
336 CmpInst *RHSCmp = cast<CmpInst>(RHSI);
338 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
339 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
340 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
343 // Min/max/abs can occur with commuted operands, non-canonical predicates,
344 // and/or non-canonical operands.
345 // Selects can be non-trivially equivalent via inverted conditions and swaps.
346 SelectPatternFlavor LSPF, RSPF;
347 Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
348 if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) &&
349 matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) {
351 // TODO: We should also detect FP min/max.
352 if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
353 LSPF == SPF_UMIN || LSPF == SPF_UMAX)
354 return ((LHSA == RHSA && LHSB == RHSB) ||
355 (LHSA == RHSB && LHSB == RHSA));
357 if (LSPF == SPF_ABS || LSPF == SPF_NABS) {
358 // Abs results are placed in a defined order by matchSelectPattern.
359 return LHSA == RHSA && LHSB == RHSB;
362 // select Cond, A, B <--> select not(Cond), B, A
363 if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
367 // If the true/false operands are swapped and the conditions are compares
368 // with inverted predicates, the selects are equal:
369 // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
371 // This also handles patterns with a double-negation in the sense of not +
372 // inverse, because we looked through a 'not' in the matching function and
374 // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
376 // This intentionally does NOT handle patterns with a double-negation in
377 // the sense of not + not, because doing so could result in values
379 // as equal that hash differently in the min/max/abs cases like:
380 // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
381 // ^ hashes as min ^ would not hash as min
382 // In the context of the EarlyCSE pass, however, such cases never reach
383 // this code, as we simplify the double-negation before hashing the second
384 // select (and so still succeed at CSEing them).
385 if (LHSA == RHSB && LHSB == RHSA) {
386 CmpInst::Predicate PredL, PredR;
388 if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) &&
389 match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) &&
390 CmpInst::getInversePredicate(PredL) == PredR)
398 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
399 // These comparisons are nontrivial, so assert that equality implies
400 // hash equality (DenseMap demands this as an invariant).
401 bool Result = isEqualImpl(LHS, RHS);
402 assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
403 getHashValueImpl(LHS) == getHashValueImpl(RHS));
407 //===----------------------------------------------------------------------===//
409 //===----------------------------------------------------------------------===//
413 /// Struct representing the available call values in the scoped hash
418 CallValue(Instruction *I) : Inst(I) {
419 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
422 bool isSentinel() const {
423 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
424 Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
427 static bool canHandle(Instruction *Inst) {
428 // Don't value number anything that returns void.
429 if (Inst->getType()->isVoidTy())
432 CallInst *CI = dyn_cast<CallInst>(Inst);
433 if (!CI || !CI->onlyReadsMemory())
439 } // end anonymous namespace
443 template <> struct DenseMapInfo<CallValue> {
444 static inline CallValue getEmptyKey() {
445 return DenseMapInfo<Instruction *>::getEmptyKey();
448 static inline CallValue getTombstoneKey() {
449 return DenseMapInfo<Instruction *>::getTombstoneKey();
452 static unsigned getHashValue(CallValue Val);
453 static bool isEqual(CallValue LHS, CallValue RHS);
456 } // end namespace llvm
458 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
459 Instruction *Inst = Val.Inst;
461 // gc.relocate is 'special' call: its second and third operands are
462 // not real values, but indices into statepoint's argument list.
463 // Get values they point to.
464 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(Inst))
465 return hash_combine(GCR->getOpcode(), GCR->getOperand(0),
466 GCR->getBasePtr(), GCR->getDerivedPtr());
468 // Hash all of the operands as pointers and mix in the opcode.
471 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
474 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
475 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
476 if (LHS.isSentinel() || RHS.isSentinel())
479 // See comment above in `getHashValue()`.
480 if (const GCRelocateInst *GCR1 = dyn_cast<GCRelocateInst>(LHSI))
481 if (const GCRelocateInst *GCR2 = dyn_cast<GCRelocateInst>(RHSI))
482 return GCR1->getOperand(0) == GCR2->getOperand(0) &&
483 GCR1->getBasePtr() == GCR2->getBasePtr() &&
484 GCR1->getDerivedPtr() == GCR2->getDerivedPtr();
486 return LHSI->isIdenticalTo(RHSI);
489 //===----------------------------------------------------------------------===//
490 // EarlyCSE implementation
491 //===----------------------------------------------------------------------===//
495 /// A simple and fast domtree-based CSE pass.
497 /// This pass does a simple depth-first walk over the dominator tree,
498 /// eliminating trivially redundant instructions and using instsimplify to
499 /// canonicalize things as it goes. It is intended to be fast and catch obvious
500 /// cases so that instcombine and other passes are more effective. It is
501 /// expected that a later pass of GVN will catch the interesting/hard cases.
504 const TargetLibraryInfo &TLI;
505 const TargetTransformInfo &TTI;
508 const SimplifyQuery SQ;
510 std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
513 RecyclingAllocator<BumpPtrAllocator,
514 ScopedHashTableVal<SimpleValue, Value *>>;
516 ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
519 /// A scoped hash table of the current values of all of our simple
520 /// scalar expressions.
522 /// As we walk down the domtree, we look to see if instructions are in this:
523 /// if so, we replace them with what we find, otherwise we insert them so
524 /// that dominated values can succeed in their lookup.
525 ScopedHTType AvailableValues;
527 /// A scoped hash table of the current values of previously encountered
528 /// memory locations.
530 /// This allows us to get efficient access to dominating loads or stores when
531 /// we have a fully redundant load. In addition to the most recent load, we
532 /// keep track of a generation count of the read, which is compared against
533 /// the current generation count. The current generation count is incremented
534 /// after every possibly writing memory operation, which ensures that we only
535 /// CSE loads with other loads that have no intervening store. Ordering
536 /// events (such as fences or atomic instructions) increment the generation
537 /// count as well; essentially, we model these as writes to all possible
538 /// locations. Note that atomic and/or volatile loads and stores can be
539 /// present the table; it is the responsibility of the consumer to inspect
540 /// the atomicity/volatility if needed.
542 Instruction *DefInst = nullptr;
543 unsigned Generation = 0;
545 bool IsAtomic = false;
547 LoadValue() = default;
548 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
550 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
551 IsAtomic(IsAtomic) {}
554 using LoadMapAllocator =
555 RecyclingAllocator<BumpPtrAllocator,
556 ScopedHashTableVal<Value *, LoadValue>>;
558 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
561 LoadHTType AvailableLoads;
563 // A scoped hash table mapping memory locations (represented as typed
564 // addresses) to generation numbers at which that memory location became
565 // (henceforth indefinitely) invariant.
566 using InvariantMapAllocator =
567 RecyclingAllocator<BumpPtrAllocator,
568 ScopedHashTableVal<MemoryLocation, unsigned>>;
569 using InvariantHTType =
570 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
571 InvariantMapAllocator>;
572 InvariantHTType AvailableInvariants;
574 /// A scoped hash table of the current values of read-only call
577 /// It uses the same generation count as loads.
579 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
580 CallHTType AvailableCalls;
582 /// This is the current generation of the memory value.
583 unsigned CurrentGeneration = 0;
585 /// Set up the EarlyCSE runner for a particular function.
586 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
587 const TargetTransformInfo &TTI, DominatorTree &DT,
588 AssumptionCache &AC, MemorySSA *MSSA)
589 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
590 MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {}
595 unsigned ClobberCounter = 0;
596 // Almost a POD, but needs to call the constructors for the scoped hash
597 // tables so that a new scope gets pushed on. These are RAII so that the
598 // scope gets popped when the NodeScope is destroyed.
601 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
602 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
603 : Scope(AvailableValues), LoadScope(AvailableLoads),
604 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
605 NodeScope(const NodeScope &) = delete;
606 NodeScope &operator=(const NodeScope &) = delete;
609 ScopedHTType::ScopeTy Scope;
610 LoadHTType::ScopeTy LoadScope;
611 InvariantHTType::ScopeTy InvariantScope;
612 CallHTType::ScopeTy CallScope;
615 // Contains all the needed information to create a stack for doing a depth
616 // first traversal of the tree. This includes scopes for values, loads, and
617 // calls as well as the generation. There is a child iterator so that the
618 // children do not need to be store separately.
621 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
622 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
623 unsigned cg, DomTreeNode *n, DomTreeNode::const_iterator child,
624 DomTreeNode::const_iterator end)
625 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
627 Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
630 StackNode(const StackNode &) = delete;
631 StackNode &operator=(const StackNode &) = delete;
634 unsigned currentGeneration() { return CurrentGeneration; }
635 unsigned childGeneration() { return ChildGeneration; }
636 void childGeneration(unsigned generation) { ChildGeneration = generation; }
637 DomTreeNode *node() { return Node; }
638 DomTreeNode::const_iterator childIter() { return ChildIter; }
640 DomTreeNode *nextChild() {
641 DomTreeNode *child = *ChildIter;
646 DomTreeNode::const_iterator end() { return EndIter; }
647 bool isProcessed() { return Processed; }
648 void process() { Processed = true; }
651 unsigned CurrentGeneration;
652 unsigned ChildGeneration;
654 DomTreeNode::const_iterator ChildIter;
655 DomTreeNode::const_iterator EndIter;
657 bool Processed = false;
660 /// Wrapper class to handle memory instructions, including loads,
661 /// stores and intrinsic loads and stores defined by the target.
662 class ParseMemoryInst {
664 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
666 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
667 if (TTI.getTgtMemIntrinsic(II, Info))
668 IsTargetMemInst = true;
671 bool isLoad() const {
672 if (IsTargetMemInst) return Info.ReadMem;
673 return isa<LoadInst>(Inst);
676 bool isStore() const {
677 if (IsTargetMemInst) return Info.WriteMem;
678 return isa<StoreInst>(Inst);
681 bool isAtomic() const {
683 return Info.Ordering != AtomicOrdering::NotAtomic;
684 return Inst->isAtomic();
687 bool isUnordered() const {
689 return Info.isUnordered();
691 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
692 return LI->isUnordered();
693 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
694 return SI->isUnordered();
696 // Conservative answer
697 return !Inst->isAtomic();
700 bool isVolatile() const {
702 return Info.IsVolatile;
704 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
705 return LI->isVolatile();
706 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
707 return SI->isVolatile();
709 // Conservative answer
713 bool isInvariantLoad() const {
714 if (auto *LI = dyn_cast<LoadInst>(Inst))
715 return LI->hasMetadata(LLVMContext::MD_invariant_load);
719 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
720 return (getPointerOperand() == Inst.getPointerOperand() &&
721 getMatchingId() == Inst.getMatchingId());
724 bool isValid() const { return getPointerOperand() != nullptr; }
726 // For regular (non-intrinsic) loads/stores, this is set to -1. For
727 // intrinsic loads/stores, the id is retrieved from the corresponding
728 // field in the MemIntrinsicInfo structure. That field contains
729 // non-negative values only.
730 int getMatchingId() const {
731 if (IsTargetMemInst) return Info.MatchingId;
735 Value *getPointerOperand() const {
736 if (IsTargetMemInst) return Info.PtrVal;
737 return getLoadStorePointerOperand(Inst);
740 bool mayReadFromMemory() const {
741 if (IsTargetMemInst) return Info.ReadMem;
742 return Inst->mayReadFromMemory();
745 bool mayWriteToMemory() const {
746 if (IsTargetMemInst) return Info.WriteMem;
747 return Inst->mayWriteToMemory();
751 bool IsTargetMemInst = false;
752 MemIntrinsicInfo Info;
756 bool processNode(DomTreeNode *Node);
758 bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
759 const BasicBlock *BB, const BasicBlock *Pred);
761 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
762 if (auto *LI = dyn_cast<LoadInst>(Inst))
764 if (auto *SI = dyn_cast<StoreInst>(Inst))
765 return SI->getValueOperand();
766 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
767 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
771 /// Return true if the instruction is known to only operate on memory
772 /// provably invariant in the given "generation".
773 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
775 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
776 Instruction *EarlierInst, Instruction *LaterInst);
778 void removeMSSA(Instruction &Inst) {
782 MSSA->verifyMemorySSA();
783 // Removing a store here can leave MemorySSA in an unoptimized state by
784 // creating MemoryPhis that have identical arguments and by creating
785 // MemoryUses whose defining access is not an actual clobber. The phi case
786 // is handled by MemorySSA when passing OptimizePhis = true to
787 // removeMemoryAccess. The non-optimized MemoryUse case is lazily updated
788 // by MemorySSA's getClobberingMemoryAccess.
789 MSSAUpdater->removeMemoryAccess(&Inst, true);
793 } // end anonymous namespace
795 /// Determine if the memory referenced by LaterInst is from the same heap
796 /// version as EarlierInst.
797 /// This is currently called in two scenarios:
809 /// in both cases we want to verify that there are no possible writes to the
810 /// memory referenced by p between the earlier and later instruction.
811 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
812 unsigned LaterGeneration,
813 Instruction *EarlierInst,
814 Instruction *LaterInst) {
815 // Check the simple memory generation tracking first.
816 if (EarlierGeneration == LaterGeneration)
822 // If MemorySSA has determined that one of EarlierInst or LaterInst does not
823 // read/write memory, then we can safely return true here.
824 // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
825 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
826 // by also checking the MemorySSA MemoryAccess on the instruction. Initial
827 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
828 // with the default optimization pipeline.
829 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
832 auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
836 // Since we know LaterDef dominates LaterInst and EarlierInst dominates
837 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
838 // EarlierInst and LaterInst and neither can any other write that potentially
839 // clobbers LaterInst.
840 MemoryAccess *LaterDef;
841 if (ClobberCounter < EarlyCSEMssaOptCap) {
842 LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
845 LaterDef = LaterMA->getDefiningAccess();
847 return MSSA->dominates(LaterDef, EarlierMA);
850 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
851 // A location loaded from with an invariant_load is assumed to *never* change
852 // within the visible scope of the compilation.
853 if (auto *LI = dyn_cast<LoadInst>(I))
854 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
857 auto MemLocOpt = MemoryLocation::getOrNone(I);
859 // "target" intrinsic forms of loads aren't currently known to
860 // MemoryLocation::get. TODO
862 MemoryLocation MemLoc = *MemLocOpt;
863 if (!AvailableInvariants.count(MemLoc))
866 // Is the generation at which this became invariant older than the
868 return AvailableInvariants.lookup(MemLoc) <= GenAt;
871 bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
872 const BranchInst *BI, const BasicBlock *BB,
873 const BasicBlock *Pred) {
874 assert(BI->isConditional() && "Should be a conditional branch!");
875 assert(BI->getCondition() == CondInst && "Wrong condition?");
876 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
877 auto *TorF = (BI->getSuccessor(0) == BB)
878 ? ConstantInt::getTrue(BB->getContext())
879 : ConstantInt::getFalse(BB->getContext());
880 auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
881 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
882 return BOp->getOpcode() == Opcode;
885 // If the condition is AND operation, we can propagate its operands into the
886 // true branch. If it is OR operation, we can propagate them into the false
888 unsigned PropagateOpcode =
889 (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
891 bool MadeChanges = false;
892 SmallVector<Instruction *, 4> WorkList;
893 SmallPtrSet<Instruction *, 4> Visited;
894 WorkList.push_back(CondInst);
895 while (!WorkList.empty()) {
896 Instruction *Curr = WorkList.pop_back_val();
898 AvailableValues.insert(Curr, TorF);
899 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
900 << Curr->getName() << "' as " << *TorF << " in "
901 << BB->getName() << "\n");
902 if (!DebugCounter::shouldExecute(CSECounter)) {
903 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
905 // Replace all dominated uses with the known value.
906 if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
907 BasicBlockEdge(Pred, BB))) {
913 if (MatchBinOp(Curr, PropagateOpcode))
914 for (auto &Op : cast<BinaryOperator>(Curr)->operands())
915 if (Instruction *OPI = dyn_cast<Instruction>(Op))
916 if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
917 WorkList.push_back(OPI);
923 bool EarlyCSE::processNode(DomTreeNode *Node) {
924 bool Changed = false;
925 BasicBlock *BB = Node->getBlock();
927 // If this block has a single predecessor, then the predecessor is the parent
928 // of the domtree node and all of the live out memory values are still current
929 // in this block. If this block has multiple predecessors, then they could
930 // have invalidated the live-out memory values of our parent value. For now,
931 // just be conservative and invalidate memory if this block has multiple
933 if (!BB->getSinglePredecessor())
936 // If this node has a single predecessor which ends in a conditional branch,
937 // we can infer the value of the branch condition given that we took this
938 // path. We need the single predecessor to ensure there's not another path
939 // which reaches this block where the condition might hold a different
940 // value. Since we're adding this to the scoped hash table (like any other
941 // def), it will have been popped if we encounter a future merge block.
942 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
943 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
944 if (BI && BI->isConditional()) {
945 auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
946 if (CondInst && SimpleValue::canHandle(CondInst))
947 Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
951 /// LastStore - Keep track of the last non-volatile store that we saw... for
952 /// as long as there in no instruction that reads memory. If we see a store
953 /// to the same location, we delete the dead store. This zaps trivial dead
954 /// stores which can occur in bitfield code among other things.
955 Instruction *LastStore = nullptr;
957 // See if any instructions in the block can be eliminated. If so, do it. If
958 // not, add them to AvailableValues.
959 for (Instruction &Inst : make_early_inc_range(BB->getInstList())) {
960 // Dead instructions should just be removed.
961 if (isInstructionTriviallyDead(&Inst, &TLI)) {
962 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << Inst << '\n');
963 if (!DebugCounter::shouldExecute(CSECounter)) {
964 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
968 salvageKnowledge(&Inst, &AC);
969 salvageDebugInfo(Inst);
971 Inst.eraseFromParent();
977 // Skip assume intrinsics, they don't really have side effects (although
978 // they're marked as such to ensure preservation of control dependencies),
979 // and this pass will not bother with its removal. However, we should mark
980 // its condition as true for all dominated blocks.
981 if (match(&Inst, m_Intrinsic<Intrinsic::assume>())) {
983 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0));
984 if (CondI && SimpleValue::canHandle(CondI)) {
985 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << Inst
987 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
989 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << Inst << '\n');
993 // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
994 if (match(&Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
995 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << Inst << '\n');
999 // We can skip all invariant.start intrinsics since they only read memory,
1000 // and we can forward values across it. For invariant starts without
1001 // invariant ends, we can use the fact that the invariantness never ends to
1002 // start a scope in the current generaton which is true for all future
1003 // generations. Also, we dont need to consume the last store since the
1004 // semantics of invariant.start allow us to perform DSE of the last
1005 // store, if there was a store following invariant.start. Consider:
1008 // invariant.start(p)
1010 // We can DSE the store to 30, since the store 40 to invariant location p
1011 // causes undefined behaviour.
1012 if (match(&Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
1013 // If there are any uses, the scope might end.
1014 if (!Inst.use_empty())
1016 MemoryLocation MemLoc =
1017 MemoryLocation::getForArgument(&cast<CallInst>(Inst), 1, TLI);
1018 // Don't start a scope if we already have a better one pushed
1019 if (!AvailableInvariants.count(MemLoc))
1020 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1024 if (isGuard(&Inst)) {
1026 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0))) {
1027 if (SimpleValue::canHandle(CondI)) {
1028 // Do we already know the actual value of this condition?
1029 if (auto *KnownCond = AvailableValues.lookup(CondI)) {
1030 // Is the condition known to be true?
1031 if (isa<ConstantInt>(KnownCond) &&
1032 cast<ConstantInt>(KnownCond)->isOne()) {
1034 << "EarlyCSE removing guard: " << Inst << '\n');
1035 salvageKnowledge(&Inst, &AC);
1037 Inst.eraseFromParent();
1041 // Use the known value if it wasn't true.
1042 cast<CallInst>(Inst).setArgOperand(0, KnownCond);
1044 // The condition we're on guarding here is true for all dominated
1046 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
1050 // Guard intrinsics read all memory, but don't write any memory.
1051 // Accordingly, don't update the generation but consume the last store (to
1052 // avoid an incorrect DSE).
1053 LastStore = nullptr;
1057 // If the instruction can be simplified (e.g. X+0 = X) then replace it with
1058 // its simpler value.
1059 if (Value *V = SimplifyInstruction(&Inst, SQ)) {
1060 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << Inst << " to: " << *V
1062 if (!DebugCounter::shouldExecute(CSECounter)) {
1063 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1065 bool Killed = false;
1066 if (!Inst.use_empty()) {
1067 Inst.replaceAllUsesWith(V);
1070 if (isInstructionTriviallyDead(&Inst, &TLI)) {
1071 salvageKnowledge(&Inst, &AC);
1073 Inst.eraseFromParent();
1084 // If this is a simple instruction that we can value number, process it.
1085 if (SimpleValue::canHandle(&Inst)) {
1086 // See if the instruction has an available value. If so, use it.
1087 if (Value *V = AvailableValues.lookup(&Inst)) {
1088 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << Inst << " to: " << *V
1090 if (!DebugCounter::shouldExecute(CSECounter)) {
1091 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1094 if (auto *I = dyn_cast<Instruction>(V))
1095 I->andIRFlags(&Inst);
1096 Inst.replaceAllUsesWith(V);
1097 salvageKnowledge(&Inst, &AC);
1099 Inst.eraseFromParent();
1105 // Otherwise, just remember that this value is available.
1106 AvailableValues.insert(&Inst, &Inst);
1110 ParseMemoryInst MemInst(&Inst, TTI);
1111 // If this is a non-volatile load, process it.
1112 if (MemInst.isValid() && MemInst.isLoad()) {
1113 // (conservatively) we can't peak past the ordering implied by this
1114 // operation, but we can add this load to our set of available values
1115 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1116 LastStore = nullptr;
1117 ++CurrentGeneration;
1120 if (MemInst.isInvariantLoad()) {
1121 // If we pass an invariant load, we know that memory location is
1122 // indefinitely constant from the moment of first dereferenceability.
1123 // We conservatively treat the invariant_load as that moment. If we
1124 // pass a invariant load after already establishing a scope, don't
1125 // restart it since we want to preserve the earliest point seen.
1126 auto MemLoc = MemoryLocation::get(&Inst);
1127 if (!AvailableInvariants.count(MemLoc))
1128 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1131 // If we have an available version of this load, and if it is the right
1132 // generation or the load is known to be from an invariant location,
1133 // replace this instruction.
1135 // If either the dominating load or the current load are invariant, then
1136 // we can assume the current load loads the same value as the dominating
1138 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1139 if (InVal.DefInst != nullptr &&
1140 InVal.MatchingId == MemInst.getMatchingId() &&
1141 // We don't yet handle removing loads with ordering of any kind.
1142 !MemInst.isVolatile() && MemInst.isUnordered() &&
1143 // We can't replace an atomic load with one which isn't also atomic.
1144 InVal.IsAtomic >= MemInst.isAtomic() &&
1145 (isOperatingOnInvariantMemAt(&Inst, InVal.Generation) ||
1146 isSameMemGeneration(InVal.Generation, CurrentGeneration,
1147 InVal.DefInst, &Inst))) {
1148 Value *Op = getOrCreateResult(InVal.DefInst, Inst.getType());
1149 if (Op != nullptr) {
1150 LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << Inst
1151 << " to: " << *InVal.DefInst << '\n');
1152 if (!DebugCounter::shouldExecute(CSECounter)) {
1153 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1156 if (!Inst.use_empty())
1157 Inst.replaceAllUsesWith(Op);
1158 salvageKnowledge(&Inst, &AC);
1160 Inst.eraseFromParent();
1167 // Otherwise, remember that we have this instruction.
1168 AvailableLoads.insert(MemInst.getPointerOperand(),
1169 LoadValue(&Inst, CurrentGeneration,
1170 MemInst.getMatchingId(),
1171 MemInst.isAtomic()));
1172 LastStore = nullptr;
1176 // If this instruction may read from memory or throw (and potentially read
1177 // from memory in the exception handler), forget LastStore. Load/store
1178 // intrinsics will indicate both a read and a write to memory. The target
1179 // may override this (e.g. so that a store intrinsic does not read from
1180 // memory, and thus will be treated the same as a regular store for
1181 // commoning purposes).
1182 if ((Inst.mayReadFromMemory() || Inst.mayThrow()) &&
1183 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1184 LastStore = nullptr;
1186 // If this is a read-only call, process it.
1187 if (CallValue::canHandle(&Inst)) {
1188 // If we have an available version of this call, and if it is the right
1189 // generation, replace this instruction.
1190 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(&Inst);
1191 if (InVal.first != nullptr &&
1192 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1194 LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << Inst
1195 << " to: " << *InVal.first << '\n');
1196 if (!DebugCounter::shouldExecute(CSECounter)) {
1197 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1200 if (!Inst.use_empty())
1201 Inst.replaceAllUsesWith(InVal.first);
1202 salvageKnowledge(&Inst, &AC);
1204 Inst.eraseFromParent();
1210 // Otherwise, remember that we have this instruction.
1211 AvailableCalls.insert(&Inst, std::make_pair(&Inst, CurrentGeneration));
1215 // A release fence requires that all stores complete before it, but does
1216 // not prevent the reordering of following loads 'before' the fence. As a
1217 // result, we don't need to consider it as writing to memory and don't need
1218 // to advance the generation. We do need to prevent DSE across the fence,
1219 // but that's handled above.
1220 if (auto *FI = dyn_cast<FenceInst>(&Inst))
1221 if (FI->getOrdering() == AtomicOrdering::Release) {
1222 assert(Inst.mayReadFromMemory() && "relied on to prevent DSE above");
1226 // write back DSE - If we write back the same value we just loaded from
1227 // the same location and haven't passed any intervening writes or ordering
1228 // operations, we can remove the write. The primary benefit is in allowing
1229 // the available load table to remain valid and value forward past where
1230 // the store originally was.
1231 if (MemInst.isValid() && MemInst.isStore()) {
1232 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1233 if (InVal.DefInst &&
1234 InVal.DefInst == getOrCreateResult(&Inst, InVal.DefInst->getType()) &&
1235 InVal.MatchingId == MemInst.getMatchingId() &&
1236 // We don't yet handle removing stores with ordering of any kind.
1237 !MemInst.isVolatile() && MemInst.isUnordered() &&
1238 (isOperatingOnInvariantMemAt(&Inst, InVal.Generation) ||
1239 isSameMemGeneration(InVal.Generation, CurrentGeneration,
1240 InVal.DefInst, &Inst))) {
1241 // It is okay to have a LastStore to a different pointer here if MemorySSA
1242 // tells us that the load and store are from the same memory generation.
1243 // In that case, LastStore should keep its present value since we're
1244 // removing the current store.
1245 assert((!LastStore ||
1246 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1247 MemInst.getPointerOperand() ||
1249 "can't have an intervening store if not using MemorySSA!");
1250 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << Inst << '\n');
1251 if (!DebugCounter::shouldExecute(CSECounter)) {
1252 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1255 salvageKnowledge(&Inst, &AC);
1257 Inst.eraseFromParent();
1260 // We can avoid incrementing the generation count since we were able
1261 // to eliminate this store.
1266 // Okay, this isn't something we can CSE at all. Check to see if it is
1267 // something that could modify memory. If so, our available memory values
1268 // cannot be used so bump the generation count.
1269 if (Inst.mayWriteToMemory()) {
1270 ++CurrentGeneration;
1272 if (MemInst.isValid() && MemInst.isStore()) {
1273 // We do a trivial form of DSE if there are two stores to the same
1274 // location with no intervening loads. Delete the earlier store.
1275 // At the moment, we don't remove ordered stores, but do remove
1276 // unordered atomic stores. There's no special requirement (for
1277 // unordered atomics) about removing atomic stores only in favor of
1278 // other atomic stores since we were going to execute the non-atomic
1279 // one anyway and the atomic one might never have become visible.
1281 ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1282 assert(LastStoreMemInst.isUnordered() &&
1283 !LastStoreMemInst.isVolatile() &&
1284 "Violated invariant");
1285 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1286 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1287 << " due to: " << Inst << '\n');
1288 if (!DebugCounter::shouldExecute(CSECounter)) {
1289 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1291 salvageKnowledge(&Inst, &AC);
1292 removeMSSA(*LastStore);
1293 LastStore->eraseFromParent();
1296 LastStore = nullptr;
1299 // fallthrough - we can exploit information about this store
1302 // Okay, we just invalidated anything we knew about loaded values. Try
1303 // to salvage *something* by remembering that the stored value is a live
1304 // version of the pointer. It is safe to forward from volatile stores
1305 // to non-volatile loads, so we don't have to check for volatility of
1307 AvailableLoads.insert(MemInst.getPointerOperand(),
1308 LoadValue(&Inst, CurrentGeneration,
1309 MemInst.getMatchingId(),
1310 MemInst.isAtomic()));
1312 // Remember that this was the last unordered store we saw for DSE. We
1313 // don't yet handle DSE on ordered or volatile stores since we don't
1314 // have a good way to model the ordering requirement for following
1315 // passes once the store is removed. We could insert a fence, but
1316 // since fences are slightly stronger than stores in their ordering,
1317 // it's not clear this is a profitable transform. Another option would
1318 // be to merge the ordering with that of the post dominating store.
1319 if (MemInst.isUnordered() && !MemInst.isVolatile())
1322 LastStore = nullptr;
1330 bool EarlyCSE::run() {
1331 // Note, deque is being used here because there is significant performance
1332 // gains over vector when the container becomes very large due to the
1333 // specific access patterns. For more information see the mailing list
1334 // discussion on this:
1335 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1336 std::deque<StackNode *> nodesToProcess;
1338 bool Changed = false;
1340 // Process the root node.
1341 nodesToProcess.push_back(new StackNode(
1342 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1343 CurrentGeneration, DT.getRootNode(),
1344 DT.getRootNode()->begin(), DT.getRootNode()->end()));
1346 assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1348 // Process the stack.
1349 while (!nodesToProcess.empty()) {
1350 // Grab the first item off the stack. Set the current generation, remove
1351 // the node from the stack, and process it.
1352 StackNode *NodeToProcess = nodesToProcess.back();
1354 // Initialize class members.
1355 CurrentGeneration = NodeToProcess->currentGeneration();
1357 // Check if the node needs to be processed.
1358 if (!NodeToProcess->isProcessed()) {
1359 // Process the node.
1360 Changed |= processNode(NodeToProcess->node());
1361 NodeToProcess->childGeneration(CurrentGeneration);
1362 NodeToProcess->process();
1363 } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1364 // Push the next child onto the stack.
1365 DomTreeNode *child = NodeToProcess->nextChild();
1366 nodesToProcess.push_back(
1367 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1368 AvailableCalls, NodeToProcess->childGeneration(),
1369 child, child->begin(), child->end()));
1371 // It has been processed, and there are no more children to process,
1372 // so delete it and pop it off the stack.
1373 delete NodeToProcess;
1374 nodesToProcess.pop_back();
1376 } // while (!nodes...)
1381 PreservedAnalyses EarlyCSEPass::run(Function &F,
1382 FunctionAnalysisManager &AM) {
1383 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1384 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1385 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1386 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1388 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1390 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1393 return PreservedAnalyses::all();
1395 PreservedAnalyses PA;
1396 PA.preserveSet<CFGAnalyses>();
1397 PA.preserve<GlobalsAA>();
1399 PA.preserve<MemorySSAAnalysis>();
1405 /// A simple and fast domtree-based CSE pass.
1407 /// This pass does a simple depth-first walk over the dominator tree,
1408 /// eliminating trivially redundant instructions and using instsimplify to
1409 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1410 /// cases so that instcombine and other passes are more effective. It is
1411 /// expected that a later pass of GVN will catch the interesting/hard cases.
1412 template<bool UseMemorySSA>
1413 class EarlyCSELegacyCommonPass : public FunctionPass {
1417 EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1419 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1421 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1424 bool runOnFunction(Function &F) override {
1425 if (skipFunction(F))
1428 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1429 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1430 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1431 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1433 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1435 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1440 void getAnalysisUsage(AnalysisUsage &AU) const override {
1441 AU.addRequired<AssumptionCacheTracker>();
1442 AU.addRequired<DominatorTreeWrapperPass>();
1443 AU.addRequired<TargetLibraryInfoWrapperPass>();
1444 AU.addRequired<TargetTransformInfoWrapperPass>();
1446 AU.addRequired<MemorySSAWrapperPass>();
1447 AU.addPreserved<MemorySSAWrapperPass>();
1449 AU.addPreserved<GlobalsAAWrapperPass>();
1450 AU.addPreserved<AAResultsWrapperPass>();
1451 AU.setPreservesCFG();
1455 } // end anonymous namespace
1457 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1460 char EarlyCSELegacyPass::ID = 0;
1462 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1464 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1465 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1466 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1467 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1468 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1470 using EarlyCSEMemSSALegacyPass =
1471 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1474 char EarlyCSEMemSSALegacyPass::ID = 0;
1476 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1478 return new EarlyCSEMemSSALegacyPass();
1480 return new EarlyCSELegacyPass();
1483 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1484 "Early CSE w/ MemorySSA", false, false)
1485 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1486 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1487 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1488 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1489 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1490 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1491 "Early CSE w/ MemorySSA", false, false)