1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements a trivial dead store elimination that only considers
10 // basic-block local redundant stores.
12 // FIXME: This should eventually be extended to be a post-dominator tree
13 // traversal. Doing so would be pretty trivial.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PostOrderIterator.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/GlobalsModRef.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/MemorySSA.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/PostDominators.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstIterator.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Module.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/DebugCounter.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
67 #include "llvm/Transforms/Utils/Local.h"
77 using namespace PatternMatch;
79 #define DEBUG_TYPE "dse"
81 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
82 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
83 STATISTIC(NumFastStores, "Number of stores deleted");
84 STATISTIC(NumFastOther, "Number of other instrs removed");
85 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
86 STATISTIC(NumModifiedStores, "Number of stores modified");
87 STATISTIC(NumNoopStores, "Number of noop stores deleted");
88 STATISTIC(NumCFGChecks, "Number of stores modified");
89 STATISTIC(NumCFGTries, "Number of stores modified");
90 STATISTIC(NumCFGSuccess, "Number of stores modified");
92 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
93 "Controls which MemoryDefs are eliminated.");
96 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
97 cl::init(true), cl::Hidden,
98 cl::desc("Enable partial-overwrite tracking in DSE"));
101 EnablePartialStoreMerging("enable-dse-partial-store-merging",
102 cl::init(true), cl::Hidden,
103 cl::desc("Enable partial store merging in DSE"));
106 EnableMemorySSA("enable-dse-memoryssa", cl::init(false), cl::Hidden,
107 cl::desc("Use the new MemorySSA-backed DSE."));
109 static cl::opt<unsigned>
110 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(100), cl::Hidden,
111 cl::desc("The number of memory instructions to scan for "
112 "dead store elimination (default = 100)"));
114 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
115 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
116 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
117 "other stores per basic block (default = 5000)"));
119 static cl::opt<unsigned> MemorySSAPathCheckLimit(
120 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
121 cl::desc("The maximum number of blocks to check when trying to prove that "
122 "all paths to an exit go through a killing block (default = 50)"));
124 //===----------------------------------------------------------------------===//
126 //===----------------------------------------------------------------------===//
127 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
128 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
130 /// Delete this instruction. Before we do, go through and zero out all the
131 /// operands of this instruction. If any of them become dead, delete them and
132 /// the computation tree that feeds them.
133 /// If ValueSet is non-null, remove any deleted instructions from it as well.
135 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
136 MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
137 InstOverlapIntervalsTy &IOL,
138 MapVector<Instruction *, bool> &ThrowableInst,
139 SmallSetVector<const Value *, 16> *ValueSet = nullptr) {
140 SmallVector<Instruction*, 32> NowDeadInsts;
142 NowDeadInsts.push_back(I);
145 // Keeping the iterator straight is a pain, so we let this routine tell the
146 // caller what the next instruction is after we're done mucking about.
147 BasicBlock::iterator NewIter = *BBI;
149 // Before we touch this instruction, remove it from memdep!
151 Instruction *DeadInst = NowDeadInsts.pop_back_val();
152 // Mark the DeadInst as dead in the list of throwable instructions.
153 auto It = ThrowableInst.find(DeadInst);
154 if (It != ThrowableInst.end())
155 ThrowableInst[It->first] = false;
158 // Try to preserve debug information attached to the dead instruction.
159 salvageDebugInfo(*DeadInst);
160 salvageKnowledge(DeadInst);
162 // This instruction is dead, zap it, in stages. Start by removing it from
163 // MemDep, which needs to know the operands and needs it to be in the
165 MD.removeInstruction(DeadInst);
167 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
168 Value *Op = DeadInst->getOperand(op);
169 DeadInst->setOperand(op, nullptr);
171 // If this operand just became dead, add it to the NowDeadInsts list.
172 if (!Op->use_empty()) continue;
174 if (Instruction *OpI = dyn_cast<Instruction>(Op))
175 if (isInstructionTriviallyDead(OpI, &TLI))
176 NowDeadInsts.push_back(OpI);
179 if (ValueSet) ValueSet->remove(DeadInst);
182 if (NewIter == DeadInst->getIterator())
183 NewIter = DeadInst->eraseFromParent();
185 DeadInst->eraseFromParent();
186 } while (!NowDeadInsts.empty());
188 // Pop dead entries from back of ThrowableInst till we find an alive entry.
189 while (!ThrowableInst.empty() && !ThrowableInst.back().second)
190 ThrowableInst.pop_back();
193 /// Does this instruction write some memory? This only returns true for things
194 /// that we can analyze with other helpers below.
195 static bool hasAnalyzableMemoryWrite(Instruction *I,
196 const TargetLibraryInfo &TLI) {
197 if (isa<StoreInst>(I))
199 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
200 switch (II->getIntrinsicID()) {
203 case Intrinsic::memset:
204 case Intrinsic::memmove:
205 case Intrinsic::memcpy:
206 case Intrinsic::memcpy_element_unordered_atomic:
207 case Intrinsic::memmove_element_unordered_atomic:
208 case Intrinsic::memset_element_unordered_atomic:
209 case Intrinsic::init_trampoline:
210 case Intrinsic::lifetime_end:
214 if (auto *CB = dyn_cast<CallBase>(I)) {
216 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
219 case LibFunc_strncpy:
221 case LibFunc_strncat:
231 /// Return a Location stored to by the specified instruction. If isRemovable
232 /// returns true, this function and getLocForRead completely describe the memory
233 /// operations for this instruction.
234 static MemoryLocation getLocForWrite(Instruction *Inst) {
236 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
237 return MemoryLocation::get(SI);
239 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
240 // memcpy/memmove/memset.
241 MemoryLocation Loc = MemoryLocation::getForDest(MI);
245 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
246 switch (II->getIntrinsicID()) {
248 return MemoryLocation(); // Unhandled intrinsic.
249 case Intrinsic::init_trampoline:
250 return MemoryLocation(II->getArgOperand(0));
251 case Intrinsic::lifetime_end: {
252 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
253 return MemoryLocation(II->getArgOperand(1), Len);
257 if (auto *CB = dyn_cast<CallBase>(Inst))
258 // All the supported TLI functions so far happen to have dest as their
260 return MemoryLocation(CB->getArgOperand(0));
261 return MemoryLocation();
264 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
265 /// instruction if any.
266 static MemoryLocation getLocForRead(Instruction *Inst,
267 const TargetLibraryInfo &TLI) {
268 assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
270 // The only instructions that both read and write are the mem transfer
271 // instructions (memcpy/memmove).
272 if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
273 return MemoryLocation::getForSource(MTI);
274 return MemoryLocation();
277 /// If the value of this instruction and the memory it writes to is unused, may
278 /// we delete this instruction?
279 static bool isRemovable(Instruction *I) {
280 // Don't remove volatile/atomic stores.
281 if (StoreInst *SI = dyn_cast<StoreInst>(I))
282 return SI->isUnordered();
284 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
285 switch (II->getIntrinsicID()) {
286 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
287 case Intrinsic::lifetime_end:
288 // Never remove dead lifetime_end's, e.g. because it is followed by a
291 case Intrinsic::init_trampoline:
292 // Always safe to remove init_trampoline.
294 case Intrinsic::memset:
295 case Intrinsic::memmove:
296 case Intrinsic::memcpy:
297 // Don't remove volatile memory intrinsics.
298 return !cast<MemIntrinsic>(II)->isVolatile();
299 case Intrinsic::memcpy_element_unordered_atomic:
300 case Intrinsic::memmove_element_unordered_atomic:
301 case Intrinsic::memset_element_unordered_atomic:
306 // note: only get here for calls with analyzable writes - i.e. libcalls
307 if (auto *CB = dyn_cast<CallBase>(I))
308 return CB->use_empty();
313 /// Returns true if the end of this instruction can be safely shortened in
315 static bool isShortenableAtTheEnd(Instruction *I) {
316 // Don't shorten stores for now
317 if (isa<StoreInst>(I))
320 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
321 switch (II->getIntrinsicID()) {
322 default: return false;
323 case Intrinsic::memset:
324 case Intrinsic::memcpy:
325 case Intrinsic::memcpy_element_unordered_atomic:
326 case Intrinsic::memset_element_unordered_atomic:
327 // Do shorten memory intrinsics.
328 // FIXME: Add memmove if it's also safe to transform.
333 // Don't shorten libcalls calls for now.
338 /// Returns true if the beginning of this instruction can be safely shortened
340 static bool isShortenableAtTheBeginning(Instruction *I) {
341 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
342 // easily done by offsetting the source address.
343 return isa<AnyMemSetInst>(I);
346 /// Return the pointer that is being written to.
347 static Value *getStoredPointerOperand(Instruction *I) {
348 //TODO: factor this to reuse getLocForWrite
349 MemoryLocation Loc = getLocForWrite(I);
351 "unable to find pointer written for analyzable instruction?");
352 // TODO: most APIs don't expect const Value *
353 return const_cast<Value*>(Loc.Ptr);
356 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
357 const TargetLibraryInfo &TLI,
361 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
363 if (getObjectSize(V, Size, DL, &TLI, Opts))
365 return MemoryLocation::UnknownSize;
370 enum OverwriteResult {
374 OW_PartialEarlierWithFullLater,
378 } // end anonymous namespace
380 /// Return 'OW_Complete' if a store to the 'Later' location completely
381 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
382 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
383 /// beginning of the 'Earlier' location is overwritten by 'Later'.
384 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
385 /// overwritten by a latter (smaller) store which doesn't write outside the big
386 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
387 static OverwriteResult isOverwrite(const MemoryLocation &Later,
388 const MemoryLocation &Earlier,
389 const DataLayout &DL,
390 const TargetLibraryInfo &TLI,
391 int64_t &EarlierOff, int64_t &LaterOff,
392 Instruction *DepWrite,
393 InstOverlapIntervalsTy &IOL,
396 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
397 // get imprecise values here, though (except for unknown sizes).
398 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
401 const uint64_t LaterSize = Later.Size.getValue();
402 const uint64_t EarlierSize = Earlier.Size.getValue();
404 const Value *P1 = Earlier.Ptr->stripPointerCasts();
405 const Value *P2 = Later.Ptr->stripPointerCasts();
407 // If the start pointers are the same, we just have to compare sizes to see if
408 // the later store was larger than the earlier store.
409 if (P1 == P2 || AA.isMustAlias(P1, P2)) {
410 // Make sure that the Later size is >= the Earlier size.
411 if (LaterSize >= EarlierSize)
415 // Check to see if the later store is to the entire object (either a global,
416 // an alloca, or a byval/inalloca argument). If so, then it clearly
417 // overwrites any other store to the same object.
418 const Value *UO1 = GetUnderlyingObject(P1, DL),
419 *UO2 = GetUnderlyingObject(P2, DL);
421 // If we can't resolve the same pointers to the same object, then we can't
422 // analyze them at all.
426 // If the "Later" store is to a recognizable object, get its size.
427 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
428 if (ObjectSize != MemoryLocation::UnknownSize)
429 if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
432 // Okay, we have stores to two completely different pointers. Try to
433 // decompose the pointer into a "base + constant_offset" form. If the base
434 // pointers are equal, then we can reason about the two stores.
437 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
438 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
440 // If the base pointers still differ, we have two completely different stores.
444 // The later store completely overlaps the earlier store if:
446 // 1. Both start at the same offset and the later one's size is greater than
447 // or equal to the earlier one's, or
452 // 2. The earlier store has an offset greater than the later offset, but which
453 // still lies completely within the later store.
456 // |----- later ------|
458 // We have to be careful here as *Off is signed while *.Size is unsigned.
459 if (EarlierOff >= LaterOff &&
460 LaterSize >= EarlierSize &&
461 uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
464 // We may now overlap, although the overlap is not complete. There might also
465 // be other incomplete overlaps, and together, they might cover the complete
467 // Note: The correctness of this logic depends on the fact that this function
468 // is not even called providing DepWrite when there are any intervening reads.
469 if (EnablePartialOverwriteTracking &&
470 LaterOff < int64_t(EarlierOff + EarlierSize) &&
471 int64_t(LaterOff + LaterSize) >= EarlierOff) {
473 // Insert our part of the overlap into the map.
474 auto &IM = IOL[DepWrite];
475 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
476 << ", " << int64_t(EarlierOff + EarlierSize)
477 << ") Later [" << LaterOff << ", "
478 << int64_t(LaterOff + LaterSize) << ")\n");
480 // Make sure that we only insert non-overlapping intervals and combine
481 // adjacent intervals. The intervals are stored in the map with the ending
482 // offset as the key (in the half-open sense) and the starting offset as
484 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
486 // Find any intervals ending at, or after, LaterIntStart which start
487 // before LaterIntEnd.
488 auto ILI = IM.lower_bound(LaterIntStart);
489 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
490 // This existing interval is overlapped with the current store somewhere
491 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
492 // intervals and adjusting our start and end.
493 LaterIntStart = std::min(LaterIntStart, ILI->second);
494 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
497 // Continue erasing and adjusting our end in case other previous
498 // intervals are also overlapped with the current store.
500 // |--- ealier 1 ---| |--- ealier 2 ---|
501 // |------- later---------|
503 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
504 assert(ILI->second > LaterIntStart && "Unexpected interval");
505 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
510 IM[LaterIntEnd] = LaterIntStart;
513 if (ILI->second <= EarlierOff &&
514 ILI->first >= int64_t(EarlierOff + EarlierSize)) {
515 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
516 << EarlierOff << ", "
517 << int64_t(EarlierOff + EarlierSize)
518 << ") Composite Later [" << ILI->second << ", "
519 << ILI->first << ")\n");
520 ++NumCompletePartials;
525 // Check for an earlier store which writes to all the memory locations that
526 // the later store writes to.
527 if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
528 int64_t(EarlierOff + EarlierSize) > LaterOff &&
529 uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
530 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
531 << EarlierOff << ", "
532 << int64_t(EarlierOff + EarlierSize)
533 << ") by a later store [" << LaterOff << ", "
534 << int64_t(LaterOff + LaterSize) << ")\n");
535 // TODO: Maybe come up with a better name?
536 return OW_PartialEarlierWithFullLater;
539 // Another interesting case is if the later store overwrites the end of the
545 // In this case we may want to trim the size of earlier to avoid generating
546 // writes to addresses which will definitely be overwritten later
547 if (!EnablePartialOverwriteTracking &&
548 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
549 int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
552 // Finally, we also need to check if the later store overwrites the beginning
553 // of the earlier store.
558 // In this case we may want to move the destination address and trim the size
559 // of earlier to avoid generating writes to addresses which will definitely
560 // be overwritten later.
561 if (!EnablePartialOverwriteTracking &&
562 (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
563 assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
564 "Expect to be handled as OW_Complete");
567 // Otherwise, they don't completely overlap.
571 /// If 'Inst' might be a self read (i.e. a noop copy of a
572 /// memory region into an identical pointer) then it doesn't actually make its
573 /// input dead in the traditional sense. Consider this case:
578 /// In this case, the second store to A does not make the first store to A dead.
579 /// The usual situation isn't an explicit A<-A store like this (which can be
580 /// trivially removed) but a case where two pointers may alias.
582 /// This function detects when it is unsafe to remove a dependent instruction
583 /// because the DSE inducing instruction may be a self-read.
584 static bool isPossibleSelfRead(Instruction *Inst,
585 const MemoryLocation &InstStoreLoc,
586 Instruction *DepWrite,
587 const TargetLibraryInfo &TLI,
589 // Self reads can only happen for instructions that read memory. Get the
591 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
592 if (!InstReadLoc.Ptr)
593 return false; // Not a reading instruction.
595 // If the read and written loc obviously don't alias, it isn't a read.
596 if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
599 if (isa<AnyMemCpyInst>(Inst)) {
600 // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
601 // but in practice memcpy(A <- B) either means that A and B are disjoint or
602 // are equal (i.e. there are not partial overlaps). Given that, if we have:
604 // memcpy/memmove(A <- B) // DepWrite
605 // memcpy(A <- B) // Inst
607 // with Inst reading/writing a >= size than DepWrite, we can reason as
610 // - If A == B then both the copies are no-ops, so the DepWrite can be
612 // - If A != B then A and B are disjoint locations in Inst. Since
613 // Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
614 // Therefore DepWrite can be removed.
615 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
617 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
621 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
622 // then it can't be considered dead.
626 /// Returns true if the memory which is accessed by the second instruction is not
627 /// modified between the first and the second instruction.
628 /// Precondition: Second instruction must be dominated by the first
630 static bool memoryIsNotModifiedBetween(Instruction *FirstI,
631 Instruction *SecondI,
633 const DataLayout &DL,
635 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
636 // instructions which can modify the memory location accessed by SecondI.
638 // While doing the walk keep track of the address to check. It might be
639 // different in different basic blocks due to PHI translation.
640 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
641 SmallVector<BlockAddressPair, 16> WorkList;
642 // Keep track of the address we visited each block with. Bail out if we
643 // visit a block with different addresses.
644 DenseMap<BasicBlock *, Value *> Visited;
646 BasicBlock::iterator FirstBBI(FirstI);
648 BasicBlock::iterator SecondBBI(SecondI);
649 BasicBlock *FirstBB = FirstI->getParent();
650 BasicBlock *SecondBB = SecondI->getParent();
651 MemoryLocation MemLoc = MemoryLocation::get(SecondI);
652 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
654 // Start checking the SecondBB.
656 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
657 bool isFirstBlock = true;
659 // Check all blocks going backward until we reach the FirstBB.
660 while (!WorkList.empty()) {
661 BlockAddressPair Current = WorkList.pop_back_val();
662 BasicBlock *B = Current.first;
663 PHITransAddr &Addr = Current.second;
664 Value *Ptr = Addr.getAddr();
666 // Ignore instructions before FirstI if this is the FirstBB.
667 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
669 BasicBlock::iterator EI;
671 // Ignore instructions after SecondI if this is the first visit of SecondBB.
672 assert(B == SecondBB && "first block is not the store block");
674 isFirstBlock = false;
676 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
677 // In this case we also have to look at instructions after SecondI.
680 for (; BI != EI; ++BI) {
681 Instruction *I = &*BI;
682 if (I->mayWriteToMemory() && I != SecondI)
683 if (isModSet(AA->getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
687 assert(B != &FirstBB->getParent()->getEntryBlock() &&
688 "Should not hit the entry block because SI must be dominated by LI");
689 for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
690 PHITransAddr PredAddr = Addr;
691 if (PredAddr.NeedsPHITranslationFromBlock(B)) {
692 if (!PredAddr.IsPotentiallyPHITranslatable())
694 if (PredAddr.PHITranslateValue(B, *PredI, DT, false))
697 Value *TranslatedPtr = PredAddr.getAddr();
698 auto Inserted = Visited.insert(std::make_pair(*PredI, TranslatedPtr));
699 if (!Inserted.second) {
700 // We already visited this block before. If it was with a different
701 // address - bail out!
702 if (TranslatedPtr != Inserted.first->second)
704 // ... otherwise just skip it.
707 WorkList.push_back(std::make_pair(*PredI, PredAddr));
714 /// Find all blocks that will unconditionally lead to the block BB and append
716 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
717 BasicBlock *BB, DominatorTree *DT) {
718 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
719 BasicBlock *Pred = *I;
720 if (Pred == BB) continue;
721 Instruction *PredTI = Pred->getTerminator();
722 if (PredTI->getNumSuccessors() != 1)
725 if (DT->isReachableFromEntry(Pred))
726 Blocks.push_back(Pred);
730 /// Handle frees of entire structures whose dependency is a store
731 /// to a field of that structure.
732 static bool handleFree(CallInst *F, AliasAnalysis *AA,
733 MemoryDependenceResults *MD, DominatorTree *DT,
734 const TargetLibraryInfo *TLI,
735 InstOverlapIntervalsTy &IOL,
736 MapVector<Instruction *, bool> &ThrowableInst) {
737 bool MadeChange = false;
739 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
740 SmallVector<BasicBlock *, 16> Blocks;
741 Blocks.push_back(F->getParent());
742 const DataLayout &DL = F->getModule()->getDataLayout();
744 while (!Blocks.empty()) {
745 BasicBlock *BB = Blocks.pop_back_val();
746 Instruction *InstPt = BB->getTerminator();
747 if (BB == F->getParent()) InstPt = F;
750 MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
751 while (Dep.isDef() || Dep.isClobber()) {
752 Instruction *Dependency = Dep.getInst();
753 if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
754 !isRemovable(Dependency))
758 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
760 // Check for aliasing.
761 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
765 dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
766 << *Dependency << '\n');
768 // DCE instructions only used to calculate that store.
769 BasicBlock::iterator BBI(Dependency);
770 deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL,
775 // Inst's old Dependency is now deleted. Compute the next dependency,
776 // which may also be dead, as in
778 // s[1] = 0; // This has just been deleted.
780 Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
783 if (Dep.isNonLocal())
784 findUnconditionalPreds(Blocks, BB, DT);
790 /// Check to see if the specified location may alias any of the stack objects in
791 /// the DeadStackObjects set. If so, they become live because the location is
793 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
794 SmallSetVector<const Value *, 16> &DeadStackObjects,
795 const DataLayout &DL, AliasAnalysis *AA,
796 const TargetLibraryInfo *TLI,
798 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
800 // A constant can't be in the dead pointer set.
801 if (isa<Constant>(UnderlyingPointer))
804 // If the kill pointer can be easily reduced to an alloca, don't bother doing
805 // extraneous AA queries.
806 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
807 DeadStackObjects.remove(UnderlyingPointer);
811 // Remove objects that could alias LoadedLoc.
812 DeadStackObjects.remove_if([&](const Value *I) {
813 // See if the loaded location could alias the stack location.
814 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
815 return !AA->isNoAlias(StackLoc, LoadedLoc);
819 /// Remove dead stores to stack-allocated locations in the function end block.
823 /// store i32 1, i32* %A
825 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
826 MemoryDependenceResults *MD,
827 const TargetLibraryInfo *TLI,
828 InstOverlapIntervalsTy &IOL,
829 MapVector<Instruction *, bool> &ThrowableInst) {
830 bool MadeChange = false;
832 // Keep track of all of the stack objects that are dead at the end of the
834 SmallSetVector<const Value*, 16> DeadStackObjects;
836 // Find all of the alloca'd pointers in the entry block.
837 BasicBlock &Entry = BB.getParent()->front();
838 for (Instruction &I : Entry) {
839 if (isa<AllocaInst>(&I))
840 DeadStackObjects.insert(&I);
842 // Okay, so these are dead heap objects, but if the pointer never escapes
843 // then it's leaked by this function anyways.
844 else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
845 DeadStackObjects.insert(&I);
848 // Treat byval or inalloca arguments the same, stores to them are dead at the
849 // end of the function.
850 for (Argument &AI : BB.getParent()->args())
851 if (AI.hasPassPointeeByValueAttr())
852 DeadStackObjects.insert(&AI);
854 const DataLayout &DL = BB.getModule()->getDataLayout();
856 // Scan the basic block backwards
857 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
860 // If we find a store, check to see if it points into a dead stack value.
861 if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
862 // See through pointer-to-pointer bitcasts
863 SmallVector<const Value *, 4> Pointers;
864 GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
866 // Stores to stack values are valid candidates for removal.
868 for (const Value *Pointer : Pointers)
869 if (!DeadStackObjects.count(Pointer)) {
875 Instruction *Dead = &*BBI;
877 LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
878 << *Dead << "\n Objects: ";
879 for (SmallVectorImpl<const Value *>::iterator I =
884 if (std::next(I) != E)
889 // DCE instructions only used to calculate that store.
890 deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, ThrowableInst,
898 // Remove any dead non-memory-mutating instructions.
899 if (isInstructionTriviallyDead(&*BBI, TLI)) {
900 LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
902 deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, ThrowableInst,
909 if (isa<AllocaInst>(BBI)) {
910 // Remove allocas from the list of dead stack objects; there can't be
911 // any references before the definition.
912 DeadStackObjects.remove(&*BBI);
916 if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
917 // Remove allocation function calls from the list of dead stack objects;
918 // there can't be any references before the definition.
919 if (isAllocLikeFn(&*BBI, TLI))
920 DeadStackObjects.remove(&*BBI);
922 // If this call does not access memory, it can't be loading any of our
924 if (AA->doesNotAccessMemory(Call))
927 // If the call might load from any of our allocas, then any store above
929 DeadStackObjects.remove_if([&](const Value *I) {
930 // See if the call site touches the value.
931 return isRefSet(AA->getModRefInfo(
932 Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
935 // If all of the allocas were clobbered by the call then we're not going
936 // to find anything else to process.
937 if (DeadStackObjects.empty())
943 // We can remove the dead stores, irrespective of the fence and its ordering
944 // (release/acquire/seq_cst). Fences only constraints the ordering of
945 // already visible stores, it does not make a store visible to other
946 // threads. So, skipping over a fence does not change a store from being
948 if (isa<FenceInst>(*BBI))
951 MemoryLocation LoadedLoc;
953 // If we encounter a use of the pointer, it is no longer considered dead
954 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
955 if (!L->isUnordered()) // Be conservative with atomic/volatile load
957 LoadedLoc = MemoryLocation::get(L);
958 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
959 LoadedLoc = MemoryLocation::get(V);
960 } else if (!BBI->mayReadFromMemory()) {
961 // Instruction doesn't read memory. Note that stores that weren't removed
962 // above will hit this case.
965 // Unknown inst; assume it clobbers everything.
969 // Remove any allocas from the DeadPointer set that are loaded, as this
970 // makes any stores above the access live.
971 removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
973 // If all of the allocas were clobbered by the access then we're not going
974 // to find anything else to process.
975 if (DeadStackObjects.empty())
982 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
983 int64_t &EarlierSize, int64_t LaterOffset,
984 int64_t LaterSize, bool IsOverwriteEnd) {
985 // TODO: base this on the target vector size so that if the earlier
986 // store was too small to get vector writes anyway then its likely
987 // a good idea to shorten it
988 // Power of 2 vector writes are probably always a bad idea to optimize
989 // as any store/memset/memcpy is likely using vector instructions so
990 // shortening it to not vector size is likely to be slower
991 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
992 unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
994 LaterOffset = int64_t(LaterOffset + LaterSize);
996 if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
997 !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
1000 int64_t NewLength = IsOverwriteEnd
1001 ? LaterOffset - EarlierOffset
1002 : EarlierSize - (LaterOffset - EarlierOffset);
1004 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
1005 // When shortening an atomic memory intrinsic, the newly shortened
1006 // length must remain an integer multiple of the element size.
1007 const uint32_t ElementSize = AMI->getElementSizeInBytes();
1008 if (0 != NewLength % ElementSize)
1012 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
1013 << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
1014 << *EarlierWrite << "\n KILLER (offset " << LaterOffset
1015 << ", " << EarlierSize << ")\n");
1017 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
1018 Value *TrimmedLength =
1019 ConstantInt::get(EarlierWriteLength->getType(), NewLength);
1020 EarlierIntrinsic->setLength(TrimmedLength);
1022 EarlierSize = NewLength;
1023 if (!IsOverwriteEnd) {
1024 int64_t OffsetMoved = (LaterOffset - EarlierOffset);
1025 Value *Indices[1] = {
1026 ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
1027 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
1028 EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
1029 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
1030 NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
1031 EarlierIntrinsic->setDest(NewDestGEP);
1032 EarlierOffset = EarlierOffset + OffsetMoved;
1037 static bool tryToShortenEnd(Instruction *EarlierWrite,
1038 OverlapIntervalsTy &IntervalMap,
1039 int64_t &EarlierStart, int64_t &EarlierSize) {
1040 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
1043 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
1044 int64_t LaterStart = OII->second;
1045 int64_t LaterSize = OII->first - LaterStart;
1047 if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
1048 LaterStart + LaterSize >= EarlierStart + EarlierSize) {
1049 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1051 IntervalMap.erase(OII);
1058 static bool tryToShortenBegin(Instruction *EarlierWrite,
1059 OverlapIntervalsTy &IntervalMap,
1060 int64_t &EarlierStart, int64_t &EarlierSize) {
1061 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
1064 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
1065 int64_t LaterStart = OII->second;
1066 int64_t LaterSize = OII->first - LaterStart;
1068 if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
1069 assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
1070 "Should have been handled as OW_Complete");
1071 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1072 LaterSize, false)) {
1073 IntervalMap.erase(OII);
1080 static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
1081 const DataLayout &DL,
1082 InstOverlapIntervalsTy &IOL) {
1083 bool Changed = false;
1084 for (auto OI : IOL) {
1085 Instruction *EarlierWrite = OI.first;
1086 MemoryLocation Loc = getLocForWrite(EarlierWrite);
1087 assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1089 const Value *Ptr = Loc.Ptr->stripPointerCasts();
1090 int64_t EarlierStart = 0;
1091 int64_t EarlierSize = int64_t(Loc.Size.getValue());
1092 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1093 OverlapIntervalsTy &IntervalMap = OI.second;
1095 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1096 if (IntervalMap.empty())
1099 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1104 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1105 AliasAnalysis *AA, MemoryDependenceResults *MD,
1106 const DataLayout &DL,
1107 const TargetLibraryInfo *TLI,
1108 InstOverlapIntervalsTy &IOL,
1109 MapVector<Instruction *, bool> &ThrowableInst,
1110 DominatorTree *DT) {
1111 // Must be a store instruction.
1112 StoreInst *SI = dyn_cast<StoreInst>(Inst);
1116 // If we're storing the same value back to a pointer that we just loaded from,
1117 // then the store can be removed.
1118 if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1119 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1121 memoryIsNotModifiedBetween(DepLoad, SI, AA, DL, DT)) {
1124 dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
1125 << *DepLoad << "\n STORE: " << *SI << '\n');
1127 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1128 ++NumRedundantStores;
1133 // Remove null stores into the calloc'ed objects
1134 Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1135 if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1136 Instruction *UnderlyingPointer =
1137 dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1139 if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1140 memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA, DL, DT)) {
1142 dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
1143 << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
1145 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1146 ++NumRedundantStores;
1154 tryToMergePartialOverlappingStores(StoreInst *Earlier, StoreInst *Later,
1155 int64_t InstWriteOffset,
1156 int64_t DepWriteOffset, const DataLayout &DL,
1157 AliasAnalysis *AA, DominatorTree *DT) {
1159 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1160 DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
1161 Later && isa<ConstantInt>(Later->getValueOperand()) &&
1162 DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
1163 memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
1164 // If the store we find is:
1165 // a) partially overwritten by the store to 'Loc'
1166 // b) the later store is fully contained in the earlier one and
1167 // c) they both have a constant value
1168 // d) none of the two stores need padding
1169 // Merge the two stores, replacing the earlier store's value with a
1170 // merge of both values.
1171 // TODO: Deal with other constant types (vectors, etc), and probably
1172 // some mem intrinsics (if needed)
1174 APInt EarlierValue =
1175 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1176 APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
1177 unsigned LaterBits = LaterValue.getBitWidth();
1178 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1179 LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1181 // Offset of the smaller store inside the larger store
1182 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1183 unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
1184 BitOffsetDiff - LaterBits
1186 APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1187 LShiftAmount + LaterBits);
1188 // Clear the bits we'll be replacing, then OR with the smaller
1189 // store, shifted appropriately.
1190 APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1191 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlier
1192 << "\n Later: " << *Later
1193 << "\n Merged Value: " << Merged << '\n');
1194 return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
1199 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1200 MemoryDependenceResults *MD, DominatorTree *DT,
1201 const TargetLibraryInfo *TLI) {
1202 const DataLayout &DL = BB.getModule()->getDataLayout();
1203 bool MadeChange = false;
1205 MapVector<Instruction *, bool> ThrowableInst;
1207 // A map of interval maps representing partially-overwritten value parts.
1208 InstOverlapIntervalsTy IOL;
1210 // Do a top-down walk on the BB.
1211 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1212 // Handle 'free' calls specially.
1213 if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1214 MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, ThrowableInst);
1215 // Increment BBI after handleFree has potentially deleted instructions.
1216 // This ensures we maintain a valid iterator.
1221 Instruction *Inst = &*BBI++;
1223 if (Inst->mayThrow()) {
1224 ThrowableInst[Inst] = true;
1228 // Check to see if Inst writes to memory. If not, continue.
1229 if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1232 // eliminateNoopStore will update in iterator, if necessary.
1233 if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL,
1234 ThrowableInst, DT)) {
1239 // If we find something that writes memory, get its memory dependence.
1240 MemDepResult InstDep = MD->getDependency(Inst);
1242 // Ignore any store where we can't find a local dependence.
1243 // FIXME: cross-block DSE would be fun. :)
1244 if (!InstDep.isDef() && !InstDep.isClobber())
1247 // Figure out what location is being stored to.
1248 MemoryLocation Loc = getLocForWrite(Inst);
1250 // If we didn't get a useful location, fail.
1254 // Loop until we find a store we can eliminate or a load that
1255 // invalidates the analysis. Without an upper bound on the number of
1256 // instructions examined, this analysis can become very time-consuming.
1257 // However, the potential gain diminishes as we process more instructions
1258 // without eliminating any of them. Therefore, we limit the number of
1259 // instructions we look at.
1260 auto Limit = MD->getDefaultBlockScanLimit();
1261 while (InstDep.isDef() || InstDep.isClobber()) {
1262 // Get the memory clobbered by the instruction we depend on. MemDep will
1263 // skip any instructions that 'Loc' clearly doesn't interact with. If we
1264 // end up depending on a may- or must-aliased load, then we can't optimize
1265 // away the store and we bail out. However, if we depend on something
1266 // that overwrites the memory location we *can* potentially optimize it.
1268 // Find out what memory location the dependent instruction stores.
1269 Instruction *DepWrite = InstDep.getInst();
1270 if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1272 MemoryLocation DepLoc = getLocForWrite(DepWrite);
1273 // If we didn't get a useful location, or if it isn't a size, bail out.
1277 // Find the last throwable instruction not removed by call to
1278 // deleteDeadInstruction.
1279 Instruction *LastThrowing = nullptr;
1280 if (!ThrowableInst.empty())
1281 LastThrowing = ThrowableInst.back().first;
1283 // Make sure we don't look past a call which might throw. This is an
1284 // issue because MemoryDependenceAnalysis works in the wrong direction:
1285 // it finds instructions which dominate the current instruction, rather than
1286 // instructions which are post-dominated by the current instruction.
1288 // If the underlying object is a non-escaping memory allocation, any store
1289 // to it is dead along the unwind edge. Otherwise, we need to preserve
1291 if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
1292 const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1293 bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1294 if (!IsStoreDeadOnUnwind) {
1295 // We're looking for a call to an allocation function
1296 // where the allocation doesn't escape before the last
1297 // throwing instruction; PointerMayBeCaptured
1298 // reasonably fast approximation.
1299 IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1300 !PointerMayBeCaptured(Underlying, false, true);
1302 if (!IsStoreDeadOnUnwind)
1306 // If we find a write that is a) removable (i.e., non-volatile), b) is
1307 // completely obliterated by the store to 'Loc', and c) which we know that
1308 // 'Inst' doesn't load from, then we can remove it.
1309 // Also try to merge two stores if a later one only touches memory written
1310 // to by the earlier one.
1311 if (isRemovable(DepWrite) &&
1312 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1313 int64_t InstWriteOffset, DepWriteOffset;
1314 OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
1315 InstWriteOffset, DepWrite, IOL, *AA,
1317 if (OR == OW_Complete) {
1318 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite
1319 << "\n KILLER: " << *Inst << '\n');
1321 // Delete the store and now-dead instructions that feed it.
1322 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1327 // We erased DepWrite; start over.
1328 InstDep = MD->getDependency(Inst);
1330 } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1332 isShortenableAtTheBeginning(DepWrite)))) {
1333 assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1334 "when partial-overwrite "
1335 "tracking is enabled");
1336 // The overwrite result is known, so these must be known, too.
1337 int64_t EarlierSize = DepLoc.Size.getValue();
1338 int64_t LaterSize = Loc.Size.getValue();
1339 bool IsOverwriteEnd = (OR == OW_End);
1340 MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1341 InstWriteOffset, LaterSize, IsOverwriteEnd);
1342 } else if (EnablePartialStoreMerging &&
1343 OR == OW_PartialEarlierWithFullLater) {
1344 auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1345 auto *Later = dyn_cast<StoreInst>(Inst);
1346 if (Constant *C = tryToMergePartialOverlappingStores(
1347 Earlier, Later, InstWriteOffset, DepWriteOffset, DL, AA,
1349 auto *SI = new StoreInst(
1350 C, Earlier->getPointerOperand(), false, Earlier->getAlign(),
1351 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1353 unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1354 LLVMContext::MD_alias_scope,
1355 LLVMContext::MD_noalias,
1356 LLVMContext::MD_nontemporal};
1357 SI->copyMetadata(*DepWrite, MDToKeep);
1358 ++NumModifiedStores;
1360 // Delete the old stores and now-dead instructions that feed them.
1361 deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL,
1363 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1367 // We erased DepWrite and Inst (Loc); start over.
1373 // If this is a may-aliased store that is clobbering the store value, we
1374 // can keep searching past it for another must-aliased pointer that stores
1375 // to the same location. For example, in:
1379 // we can remove the first store to P even though we don't know if P and Q
1381 if (DepWrite == &BB.front()) break;
1383 // Can't look past this instruction if it might read 'Loc'.
1384 if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1387 InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1388 DepWrite->getIterator(), &BB,
1389 /*QueryInst=*/ nullptr, &Limit);
1393 if (EnablePartialOverwriteTracking)
1394 MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1396 // If this block ends in a return, unwind, or unreachable, all allocas are
1397 // dead at its end, which means stores to them are also dead.
1398 if (BB.getTerminator()->getNumSuccessors() == 0)
1399 MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, ThrowableInst);
1404 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1405 MemoryDependenceResults *MD, DominatorTree *DT,
1406 const TargetLibraryInfo *TLI) {
1407 bool MadeChange = false;
1408 for (BasicBlock &BB : F)
1409 // Only check non-dead blocks. Dead blocks may have strange pointer
1410 // cycles that will confuse alias analysis.
1411 if (DT->isReachableFromEntry(&BB))
1412 MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1418 //=============================================================================
1419 // MemorySSA backed dead store elimination.
1421 // The code below implements dead store elimination using MemorySSA. It uses
1422 // the following general approach: given a MemoryDef, walk upwards to find
1423 // clobbering MemoryDefs that may be killed by the starting def. Then check
1424 // that there are no uses that may read the location of the original MemoryDef
1425 // in between both MemoryDefs. A bit more concretely:
1427 // For all MemoryDefs StartDef:
1428 // 1. Get the next dominating clobbering MemoryDef (DomAccess) by walking
1430 // 2. Check that there are no reads between DomAccess and the StartDef by
1431 // checking all uses starting at DomAccess and walking until we see StartDef.
1432 // 3. For each found DomDef, check that:
1433 // 1. There are no barrier instructions between DomDef and StartDef (like
1434 // throws or stores with ordering constraints).
1435 // 2. StartDef is executed whenever DomDef is executed.
1436 // 3. StartDef completely overwrites DomDef.
1437 // 4. Erase DomDef from the function and MemorySSA.
1439 // Returns true if \p M is an intrisnic that does not read or write memory.
1440 bool isNoopIntrinsic(MemoryUseOrDef *M) {
1441 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(M->getMemoryInst())) {
1442 switch (II->getIntrinsicID()) {
1443 case Intrinsic::lifetime_start:
1444 case Intrinsic::lifetime_end:
1445 case Intrinsic::invariant_end:
1446 case Intrinsic::launder_invariant_group:
1447 case Intrinsic::assume:
1449 case Intrinsic::dbg_addr:
1450 case Intrinsic::dbg_declare:
1451 case Intrinsic::dbg_label:
1452 case Intrinsic::dbg_value:
1453 llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
1461 // Check if we can ignore \p D for DSE.
1462 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
1463 Instruction *DI = D->getMemoryInst();
1464 // Calls that only access inaccessible memory cannot read or write any memory
1465 // locations we consider for elimination.
1466 if (auto *CB = dyn_cast<CallBase>(DI))
1467 if (CB->onlyAccessesInaccessibleMemory())
1470 // We can eliminate stores to locations not visible to the caller across
1471 // throwing instructions.
1472 if (DI->mayThrow() && !DefVisibleToCaller)
1475 // We can remove the dead stores, irrespective of the fence and its ordering
1476 // (release/acquire/seq_cst). Fences only constraints the ordering of
1477 // already visible stores, it does not make a store visible to other
1478 // threads. So, skipping over a fence does not change a store from being
1480 if (isa<FenceInst>(DI))
1483 // Skip intrinsics that do not really read or modify memory.
1484 if (isNoopIntrinsic(D))
1495 PostDominatorTree &PDT;
1496 const TargetLibraryInfo &TLI;
1498 // All MemoryDefs that potentially could kill other MemDefs.
1499 SmallVector<MemoryDef *, 64> MemDefs;
1500 // Any that should be skipped as they are already deleted
1501 SmallPtrSet<MemoryAccess *, 4> SkipStores;
1502 // Keep track of all of the objects that are invisible to the caller before
1503 // the function returns.
1504 SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
1505 // Keep track of all of the objects that are invisible to the caller after
1506 // the function returns.
1507 SmallPtrSet<const Value *, 16> InvisibleToCallerAfterRet;
1508 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
1509 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
1510 // Post-order numbers for each basic block. Used to figure out if memory
1511 // accesses are executed before another access.
1512 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
1514 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
1516 DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
1518 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
1519 PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
1520 : F(F), AA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI) {}
1522 static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1523 DominatorTree &DT, PostDominatorTree &PDT,
1524 const TargetLibraryInfo &TLI) {
1525 DSEState State(F, AA, MSSA, DT, PDT, TLI);
1526 // Collect blocks with throwing instructions not modeled in MemorySSA and
1527 // alloc-like objects.
1529 for (BasicBlock *BB : post_order(&F)) {
1530 State.PostOrderNumbers[BB] = PO++;
1531 for (Instruction &I : *BB) {
1532 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
1533 if (I.mayThrow() && !MA)
1534 State.ThrowingBlocks.insert(I.getParent());
1536 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
1537 if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
1538 (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
1539 State.MemDefs.push_back(MD);
1541 // Track whether alloca and alloca-like objects are visible in the
1542 // caller before and after the function returns. Alloca objects are
1543 // invalid in the caller, so they are neither visible before or after
1544 // the function returns.
1545 if (isa<AllocaInst>(&I)) {
1546 State.InvisibleToCallerBeforeRet.insert(&I);
1547 State.InvisibleToCallerAfterRet.insert(&I);
1550 // For alloca-like objects we need to check if they are captured before
1551 // the function returns and if the return might capture the object.
1552 if (isAllocLikeFn(&I, &TLI)) {
1553 bool CapturesBeforeRet = PointerMayBeCaptured(&I, false, true);
1554 if (!CapturesBeforeRet) {
1555 State.InvisibleToCallerBeforeRet.insert(&I);
1556 if (!PointerMayBeCaptured(&I, true, false))
1557 State.InvisibleToCallerAfterRet.insert(&I);
1563 // Treat byval or inalloca arguments the same as Allocas, stores to them are
1564 // dead at the end of the function.
1565 for (Argument &AI : F.args())
1566 if (AI.hasPassPointeeByValueAttr()) {
1567 // For byval, the caller doesn't know the address of the allocation.
1568 if (AI.hasByValAttr())
1569 State.InvisibleToCallerBeforeRet.insert(&AI);
1570 State.InvisibleToCallerAfterRet.insert(&AI);
1576 Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1577 if (!I->mayWriteToMemory())
1580 if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1581 return {MemoryLocation::getForDest(MTI)};
1583 if (auto *CB = dyn_cast<CallBase>(I)) {
1585 if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1587 case LibFunc_strcpy:
1588 case LibFunc_strncpy:
1589 case LibFunc_strcat:
1590 case LibFunc_strncat:
1591 return {MemoryLocation(CB->getArgOperand(0))};
1599 return MemoryLocation::getOrNone(I);
1602 /// Returns true if \p Use completely overwrites \p DefLoc.
1603 bool isCompleteOverwrite(MemoryLocation DefLoc, Instruction *UseInst) const {
1604 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1605 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1607 if (!UseInst->mayWriteToMemory())
1610 if (auto *CB = dyn_cast<CallBase>(UseInst))
1611 if (CB->onlyAccessesInaccessibleMemory())
1614 int64_t InstWriteOffset, DepWriteOffset;
1615 auto CC = getLocForWriteEx(UseInst);
1616 InstOverlapIntervalsTy IOL;
1618 const DataLayout &DL = F.getParent()->getDataLayout();
1621 isOverwrite(*CC, DefLoc, DL, TLI, DepWriteOffset, InstWriteOffset,
1622 UseInst, IOL, AA, &F) == OW_Complete;
1625 /// Returns true if \p Def is not read before returning from the function.
1626 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1627 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("
1628 << *Def->getMemoryInst()
1629 << ") is at the end the function \n");
1631 auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1633 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n");
1637 SmallVector<MemoryAccess *, 4> WorkList;
1638 SmallPtrSet<MemoryAccess *, 8> Visited;
1639 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1640 if (!Visited.insert(Acc).second)
1642 for (Use &U : Acc->uses())
1643 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1646 for (unsigned I = 0; I < WorkList.size(); I++) {
1647 if (WorkList.size() >= MemorySSAScanLimit) {
1648 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n");
1652 MemoryAccess *UseAccess = WorkList[I];
1653 if (isa<MemoryPhi>(UseAccess)) {
1654 PushMemUses(UseAccess);
1658 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1659 // of times this is called and/or caching it.
1660 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1661 if (isReadClobber(*MaybeLoc, UseInst)) {
1662 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n");
1666 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1667 PushMemUses(UseDef);
1672 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1673 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1674 /// indicating whether \p I is a free-like call.
1675 Optional<std::pair<MemoryLocation, bool>>
1676 getLocForTerminator(Instruction *I) const {
1679 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1681 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1683 if (auto *CB = dyn_cast<CallBase>(I)) {
1684 if (isFreeCall(I, &TLI))
1685 return {std::make_pair(MemoryLocation(CB->getArgOperand(0)), true)};
1691 /// Returns true if \p I is a memory terminator instruction like
1692 /// llvm.lifetime.end or free.
1693 bool isMemTerminatorInst(Instruction *I) const {
1694 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1695 return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1696 isFreeCall(I, &TLI);
1699 /// Returns true if \p MaybeTerm is a memory terminator for the same
1700 /// underlying object as \p DefLoc.
1701 bool isMemTerminator(MemoryLocation DefLoc, Instruction *MaybeTerm) const {
1702 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1703 getLocForTerminator(MaybeTerm);
1708 // If the terminator is a free-like call, all accesses to the underlying
1709 // object can be considered terminated.
1710 if (MaybeTermLoc->second) {
1711 DataLayout DL = MaybeTerm->getParent()->getModule()->getDataLayout();
1712 DefLoc = MemoryLocation(GetUnderlyingObject(DefLoc.Ptr, DL));
1714 return AA.isMustAlias(MaybeTermLoc->first, DefLoc);
1717 // Returns true if \p Use may read from \p DefLoc.
1718 bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst) const {
1719 if (!UseInst->mayReadFromMemory())
1722 if (auto *CB = dyn_cast<CallBase>(UseInst))
1723 if (CB->onlyAccessesInaccessibleMemory())
1726 ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc);
1727 // If necessary, perform additional analysis.
1729 MR = AA.callCapturesBefore(UseInst, DefLoc, &DT);
1730 return isRefSet(MR);
1733 // Find a MemoryDef writing to \p DefLoc and dominating \p Current, with no
1734 // read access between them or on any other path to a function exit block if
1735 // \p DefLoc is not accessible after the function returns. If there is no such
1736 // MemoryDef, return None. The returned value may not (completely) overwrite
1737 // \p DefLoc. Currently we bail out when we encounter an aliasing MemoryUse
1739 Optional<MemoryAccess *>
1740 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *Current,
1741 MemoryLocation DefLoc, bool DefVisibleToCallerBeforeRet,
1742 bool DefVisibleToCallerAfterRet, int &ScanLimit) const {
1743 MemoryAccess *DomAccess;
1745 LLVM_DEBUG(dbgs() << " trying to get dominating access for " << *Current
1747 // Find the next clobbering Mod access for DefLoc, starting at Current.
1751 if (MSSA.isLiveOnEntryDef(Current))
1754 if (isa<MemoryPhi>(Current)) {
1755 DomAccess = Current;
1758 MemoryUseOrDef *CurrentUD = cast<MemoryUseOrDef>(Current);
1759 // Look for access that clobber DefLoc.
1760 DomAccess = MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(CurrentUD,
1762 if (MSSA.isLiveOnEntryDef(DomAccess))
1765 if (isa<MemoryPhi>(DomAccess))
1768 // Check if we can skip DomDef for DSE.
1769 MemoryDef *DomDef = dyn_cast<MemoryDef>(DomAccess);
1770 if (DomDef && canSkipDef(DomDef, DefVisibleToCallerBeforeRet)) {
1772 Current = DomDef->getDefiningAccess();
1775 } while (StepAgain);
1777 // Accesses to objects accessible after the function returns can only be
1778 // eliminated if the access is killed along all paths to the exit. Collect
1779 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1780 // they cover all paths from DomAccess to any function exit.
1781 SmallPtrSet<BasicBlock *, 16> KillingBlocks = {KillingDef->getBlock()};
1783 dbgs() << " Checking for reads of " << *DomAccess;
1784 if (isa<MemoryDef>(DomAccess))
1785 dbgs() << " (" << *cast<MemoryDef>(DomAccess)->getMemoryInst() << ")\n";
1790 SmallSetVector<MemoryAccess *, 32> WorkList;
1791 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1792 for (Use &U : Acc->uses())
1793 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1795 PushMemUses(DomAccess);
1797 // Check if DomDef may be read.
1798 for (unsigned I = 0; I < WorkList.size(); I++) {
1799 MemoryAccess *UseAccess = WorkList[I];
1801 LLVM_DEBUG(dbgs() << " " << *UseAccess);
1802 if (--ScanLimit == 0) {
1803 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1807 if (isa<MemoryPhi>(UseAccess)) {
1808 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n");
1809 PushMemUses(UseAccess);
1813 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1814 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1816 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess))) {
1817 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n");
1818 PushMemUses(UseAccess);
1822 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1823 // MemoryAccesses. We do not have to check it's users.
1824 if (isMemTerminator(DefLoc, UseInst))
1827 // Uses which may read the original MemoryDef mean we cannot eliminate the
1828 // original MD. Stop walk.
1829 if (isReadClobber(DefLoc, UseInst)) {
1830 LLVM_DEBUG(dbgs() << " ... found read clobber\n");
1834 // For the KillingDef and DomAccess we only have to check if it reads the
1836 // TODO: It would probably be better to check for self-reads before
1837 // calling the function.
1838 if (KillingDef == UseAccess || DomAccess == UseAccess) {
1839 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n");
1843 // Check all uses for MemoryDefs, except for defs completely overwriting
1844 // the original location. Otherwise we have to check uses of *all*
1845 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1846 // miss cases like the following
1847 // 1 = Def(LoE) ; <----- DomDef stores [0,1]
1848 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1849 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1850 // (The Use points to the *first* Def it may alias)
1851 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1853 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1854 if (isCompleteOverwrite(DefLoc, UseInst)) {
1855 if (DefVisibleToCallerAfterRet && UseAccess != DomAccess) {
1856 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1857 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1858 PostOrderNumbers.find(DomAccess->getBlock())->second) {
1860 LLVM_DEBUG(dbgs() << " ... found killing block "
1861 << MaybeKillingBlock->getName() << "\n");
1862 KillingBlocks.insert(MaybeKillingBlock);
1866 PushMemUses(UseDef);
1870 // For accesses to locations visible after the function returns, make sure
1871 // that the location is killed (=overwritten) along all paths from DomAccess
1873 if (DefVisibleToCallerAfterRet) {
1874 assert(!KillingBlocks.empty() &&
1875 "Expected at least a single killing block");
1876 // Find the common post-dominator of all killing blocks.
1877 BasicBlock *CommonPred = *KillingBlocks.begin();
1878 for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1882 CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1885 // If CommonPred is in the set of killing blocks, just check if it
1886 // post-dominates DomAccess.
1887 if (KillingBlocks.count(CommonPred)) {
1888 if (PDT.dominates(CommonPred, DomAccess->getBlock()))
1893 // If the common post-dominator does not post-dominate DomAccess, there
1894 // is a path from DomAccess to an exit not going through a killing block.
1895 if (PDT.dominates(CommonPred, DomAccess->getBlock())) {
1896 SetVector<BasicBlock *> WorkList;
1898 // DomAccess's post-order number provides an upper bound of the blocks
1899 // on a path starting at DomAccess.
1900 unsigned UpperBound =
1901 PostOrderNumbers.find(DomAccess->getBlock())->second;
1903 // If CommonPred is null, there are multiple exits from the function.
1904 // They all have to be added to the worklist.
1906 WorkList.insert(CommonPred);
1908 for (BasicBlock *R : PDT.roots())
1912 // Check if all paths starting from an exit node go through one of the
1913 // killing blocks before reaching DomAccess.
1914 for (unsigned I = 0; I < WorkList.size(); I++) {
1916 BasicBlock *Current = WorkList[I];
1917 if (KillingBlocks.count(Current))
1919 if (Current == DomAccess->getBlock())
1922 // DomAccess is reachable from the entry, so we don't have to explore
1923 // unreachable blocks further.
1924 if (!DT.isReachableFromEntry(Current))
1927 unsigned CPO = PostOrderNumbers.find(Current)->second;
1928 // Current block is not on a path starting at DomAccess.
1929 if (CPO > UpperBound)
1931 for (BasicBlock *Pred : predecessors(Current))
1932 WorkList.insert(Pred);
1934 if (WorkList.size() >= MemorySSAPathCheckLimit)
1943 // No aliasing MemoryUses of DomAccess found, DomAccess is potentially dead.
1947 // Delete dead memory defs
1948 void deleteDeadInstruction(Instruction *SI) {
1949 MemorySSAUpdater Updater(&MSSA);
1950 SmallVector<Instruction *, 32> NowDeadInsts;
1951 NowDeadInsts.push_back(SI);
1954 while (!NowDeadInsts.empty()) {
1955 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1958 // Try to preserve debug information attached to the dead instruction.
1959 salvageDebugInfo(*DeadInst);
1960 salvageKnowledge(DeadInst);
1962 // Remove the Instruction from MSSA.
1963 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1964 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1965 SkipStores.insert(MD);
1967 Updater.removeMemoryAccess(MA);
1970 auto I = IOLs.find(DeadInst->getParent());
1971 if (I != IOLs.end())
1972 I->second.erase(DeadInst);
1973 // Remove its operands
1974 for (Use &O : DeadInst->operands())
1975 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1977 if (isInstructionTriviallyDead(OpI, &TLI))
1978 NowDeadInsts.push_back(OpI);
1981 DeadInst->eraseFromParent();
1985 // Check for any extra throws between SI and NI that block DSE. This only
1986 // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1987 // throw are handled during the walk from one def to the next.
1988 bool mayThrowBetween(Instruction *SI, Instruction *NI,
1989 const Value *SILocUnd) const {
1990 // First see if we can ignore it by using the fact that SI is an
1991 // alloca/alloca like object that is not visible to the caller during
1992 // execution of the function.
1993 if (SILocUnd && InvisibleToCallerBeforeRet.count(SILocUnd))
1996 if (SI->getParent() == NI->getParent())
1997 return ThrowingBlocks.count(SI->getParent());
1998 return !ThrowingBlocks.empty();
2001 // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
2003 // * A memory instruction that may throw and \p SI accesses a non-stack
2005 // * Atomic stores stronger that monotonic.
2006 bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) const {
2007 // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
2008 // like object that does not escape.
2009 if (NI->mayThrow() && !InvisibleToCallerBeforeRet.count(SILocUnd))
2012 // If NI is an atomic load/store stronger than monotonic, do not try to
2013 // eliminate/reorder it.
2014 if (NI->isAtomic()) {
2015 if (auto *LI = dyn_cast<LoadInst>(NI))
2016 return isStrongerThanMonotonic(LI->getOrdering());
2017 if (auto *SI = dyn_cast<StoreInst>(NI))
2018 return isStrongerThanMonotonic(SI->getOrdering());
2019 llvm_unreachable("other instructions should be skipped in MemorySSA");
2024 /// Eliminate writes to objects that are not visible in the caller and are not
2025 /// accessed before returning from the function.
2026 bool eliminateDeadWritesAtEndOfFunction() {
2027 const DataLayout &DL = F.getParent()->getDataLayout();
2028 bool MadeChange = false;
2031 << "Trying to eliminate MemoryDefs at the end of the function\n");
2032 for (int I = MemDefs.size() - 1; I >= 0; I--) {
2033 MemoryDef *Def = MemDefs[I];
2034 if (SkipStores.find(Def) != SkipStores.end() ||
2035 !isRemovable(Def->getMemoryInst()))
2038 // TODO: Consider doing the underlying object check first, if it is
2039 // beneficial compile-time wise.
2040 if (isWriteAtEndOfFunction(Def)) {
2041 Instruction *DefI = Def->getMemoryInst();
2042 // See through pointer-to-pointer bitcasts
2043 SmallVector<const Value *, 4> Pointers;
2044 GetUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
2046 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
2047 "of the function\n");
2048 bool CanKill = true;
2049 for (const Value *Pointer : Pointers) {
2050 if (!InvisibleToCallerAfterRet.count(Pointer)) {
2057 deleteDeadInstruction(DefI);
2066 /// \returns true if \p Def is a no-op store, either because it
2067 /// directly stores back a loaded value or stores zero to a calloced object.
2068 bool storeIsNoop(MemoryDef *Def, MemoryLocation DefLoc, const Value *DefUO) {
2069 StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
2073 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
2074 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
2075 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
2076 // If both accesses share the same defining access, no instructions
2077 // between them can modify the memory location.
2078 return LoadAccess == Def->getDefiningAccess();
2082 Constant *StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
2083 if (StoredConstant && StoredConstant->isNullValue()) {
2084 auto *DefUOInst = dyn_cast<Instruction>(DefUO);
2085 if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
2086 auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
2087 // If UnderlyingDef is the clobbering access of Def, no instructions
2088 // between them can modify the memory location.
2090 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
2091 return UnderlyingDef == ClobberDef;
2098 bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
2099 MemorySSA &MSSA, DominatorTree &DT,
2100 PostDominatorTree &PDT,
2101 const TargetLibraryInfo &TLI) {
2102 const DataLayout &DL = F.getParent()->getDataLayout();
2103 bool MadeChange = false;
2105 DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
2107 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2108 MemoryDef *KillingDef = State.MemDefs[I];
2109 if (State.SkipStores.count(KillingDef))
2111 Instruction *SI = KillingDef->getMemoryInst();
2113 auto MaybeSILoc = State.getLocForWriteEx(SI);
2114 if (State.isMemTerminatorInst(SI))
2115 MaybeSILoc = State.getLocForTerminator(SI).map(
2116 [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
2118 MaybeSILoc = State.getLocForWriteEx(SI);
2121 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2125 MemoryLocation SILoc = *MaybeSILoc;
2126 assert(SILoc.Ptr && "SILoc should not be null");
2127 const Value *SILocUnd = GetUnderlyingObject(SILoc.Ptr, DL);
2129 // Check if the store is a no-op.
2130 if (isRemovable(SI) && State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
2131 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI << '\n');
2132 State.deleteDeadInstruction(SI);
2138 Instruction *DefObj =
2139 const_cast<Instruction *>(dyn_cast<Instruction>(SILocUnd));
2140 bool DefVisibleToCallerBeforeRet =
2141 !State.InvisibleToCallerBeforeRet.count(SILocUnd);
2142 bool DefVisibleToCallerAfterRet =
2143 !State.InvisibleToCallerAfterRet.count(SILocUnd);
2144 if (DefObj && isAllocLikeFn(DefObj, &TLI)) {
2145 if (DefVisibleToCallerBeforeRet)
2146 DefVisibleToCallerBeforeRet =
2147 PointerMayBeCapturedBefore(DefObj, false, true, SI, &DT);
2150 MemoryAccess *Current = KillingDef;
2151 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2152 << *KillingDef << " (" << *SI << ")\n");
2154 int ScanLimit = MemorySSAScanLimit;
2155 // Worklist of MemoryAccesses that may be killed by KillingDef.
2156 SetVector<MemoryAccess *> ToCheck;
2157 ToCheck.insert(KillingDef->getDefiningAccess());
2159 // Check if MemoryAccesses in the worklist are killed by KillingDef.
2160 for (unsigned I = 0; I < ToCheck.size(); I++) {
2161 Current = ToCheck[I];
2162 if (State.SkipStores.count(Current))
2165 Optional<MemoryAccess *> Next = State.getDomMemoryDef(
2166 KillingDef, Current, SILoc, DefVisibleToCallerBeforeRet,
2167 DefVisibleToCallerAfterRet, ScanLimit);
2170 LLVM_DEBUG(dbgs() << " finished walk\n");
2174 MemoryAccess *DomAccess = *Next;
2175 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DomAccess);
2176 if (isa<MemoryPhi>(DomAccess)) {
2177 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n");
2178 for (Value *V : cast<MemoryPhi>(DomAccess)->incoming_values()) {
2179 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2180 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2181 BasicBlock *PhiBlock = DomAccess->getBlock();
2183 // We only consider incoming MemoryAccesses that come before the
2184 // MemoryPhi. Otherwise we could discover candidates that do not
2185 // strictly dominate our starting def.
2186 if (State.PostOrderNumbers[IncomingBlock] >
2187 State.PostOrderNumbers[PhiBlock])
2188 ToCheck.insert(IncomingAccess);
2192 MemoryDef *NextDef = dyn_cast<MemoryDef>(DomAccess);
2193 Instruction *NI = NextDef->getMemoryInst();
2194 LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
2196 // Before we try to remove anything, check for any extra throwing
2197 // instructions that block us from DSEing
2198 if (State.mayThrowBetween(SI, NI, SILocUnd)) {
2199 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n");
2203 // Check for anything that looks like it will be a barrier to further
2205 if (State.isDSEBarrier(SILocUnd, NI)) {
2206 LLVM_DEBUG(dbgs() << " ... skip, barrier\n");
2210 ToCheck.insert(NextDef->getDefiningAccess());
2212 if (!hasAnalyzableMemoryWrite(NI, TLI)) {
2213 LLVM_DEBUG(dbgs() << " ... skip, cannot analyze def\n");
2217 if (!isRemovable(NI)) {
2218 LLVM_DEBUG(dbgs() << " ... skip, cannot remove def\n");
2222 if (!DebugCounter::shouldExecute(MemorySSACounter))
2225 MemoryLocation NILoc = *State.getLocForWriteEx(NI);
2227 if (State.isMemTerminatorInst(SI)) {
2228 const Value *NIUnd = GetUnderlyingObject(NILoc.Ptr, DL);
2229 if (!SILocUnd || SILocUnd != NIUnd)
2231 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI
2232 << "\n KILLER: " << *SI << '\n');
2233 State.deleteDeadInstruction(NI);
2237 // Check if NI overwrites SI.
2238 int64_t InstWriteOffset, DepWriteOffset;
2239 auto Iter = State.IOLs.insert(
2240 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2241 NI->getParent(), InstOverlapIntervalsTy()));
2242 auto &IOL = Iter.first->second;
2243 OverwriteResult OR = isOverwrite(SILoc, NILoc, DL, TLI, DepWriteOffset,
2244 InstWriteOffset, NI, IOL, AA, &F);
2246 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2247 auto *Earlier = dyn_cast<StoreInst>(NI);
2248 auto *Later = dyn_cast<StoreInst>(SI);
2249 if (Constant *Merged = tryToMergePartialOverlappingStores(
2250 Earlier, Later, InstWriteOffset, DepWriteOffset, DL, &AA,
2253 // Update stored value of earlier store to merged constant.
2254 Earlier->setOperand(0, Merged);
2255 ++NumModifiedStores;
2258 // Remove later store and remove any outstanding overlap intervals
2259 // for the updated store.
2260 State.deleteDeadInstruction(Later);
2261 auto I = State.IOLs.find(Earlier->getParent());
2262 if (I != State.IOLs.end())
2263 I->second.erase(Earlier);
2268 if (OR == OW_Complete) {
2269 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI
2270 << "\n KILLER: " << *SI << '\n');
2271 State.deleteDeadInstruction(NI);
2279 if (EnablePartialOverwriteTracking)
2280 for (auto &KV : State.IOLs)
2281 MadeChange |= removePartiallyOverlappedStores(&AA, DL, KV.second);
2283 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2286 } // end anonymous namespace
2288 //===----------------------------------------------------------------------===//
2290 //===----------------------------------------------------------------------===//
2291 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2292 AliasAnalysis &AA = AM.getResult<AAManager>(F);
2293 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2294 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2296 bool Changed = false;
2297 if (EnableMemorySSA) {
2298 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2299 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2301 Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2303 MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);
2305 Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2308 #ifdef LLVM_ENABLE_STATS
2309 if (AreStatisticsEnabled())
2310 for (auto &I : instructions(F))
2311 NumRemainingStores += isa<StoreInst>(&I);
2315 return PreservedAnalyses::all();
2317 PreservedAnalyses PA;
2318 PA.preserveSet<CFGAnalyses>();
2319 PA.preserve<GlobalsAA>();
2320 if (EnableMemorySSA)
2321 PA.preserve<MemorySSAAnalysis>();
2323 PA.preserve<MemoryDependenceAnalysis>();
2329 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2330 class DSELegacyPass : public FunctionPass {
2332 static char ID; // Pass identification, replacement for typeid
2334 DSELegacyPass() : FunctionPass(ID) {
2335 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2338 bool runOnFunction(Function &F) override {
2339 if (skipFunction(F))
2342 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2343 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2344 const TargetLibraryInfo &TLI =
2345 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2347 bool Changed = false;
2348 if (EnableMemorySSA) {
2349 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2350 PostDominatorTree &PDT =
2351 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2353 Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2355 MemoryDependenceResults &MD =
2356 getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
2358 Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2361 #ifdef LLVM_ENABLE_STATS
2362 if (AreStatisticsEnabled())
2363 for (auto &I : instructions(F))
2364 NumRemainingStores += isa<StoreInst>(&I);
2370 void getAnalysisUsage(AnalysisUsage &AU) const override {
2371 AU.setPreservesCFG();
2372 AU.addRequired<AAResultsWrapperPass>();
2373 AU.addRequired<TargetLibraryInfoWrapperPass>();
2374 AU.addPreserved<GlobalsAAWrapperPass>();
2375 AU.addRequired<DominatorTreeWrapperPass>();
2376 AU.addPreserved<DominatorTreeWrapperPass>();
2378 if (EnableMemorySSA) {
2379 AU.addRequired<PostDominatorTreeWrapperPass>();
2380 AU.addRequired<MemorySSAWrapperPass>();
2381 AU.addPreserved<PostDominatorTreeWrapperPass>();
2382 AU.addPreserved<MemorySSAWrapperPass>();
2384 AU.addRequired<MemoryDependenceWrapperPass>();
2385 AU.addPreserved<MemoryDependenceWrapperPass>();
2390 } // end anonymous namespace
2392 char DSELegacyPass::ID = 0;
2394 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2396 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2397 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2398 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2399 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2400 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2401 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2402 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2403 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2406 FunctionPass *llvm::createDeadStoreEliminationPass() {
2407 return new DSELegacyPass();