1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Utils/Local.h"
46 #define DEBUG_TYPE "dse"
48 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
49 STATISTIC(NumFastStores, "Number of stores deleted");
50 STATISTIC(NumFastOther , "Number of other instrs removed");
51 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
54 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
55 cl::init(true), cl::Hidden,
56 cl::desc("Enable partial-overwrite tracking in DSE"));
59 //===----------------------------------------------------------------------===//
61 //===----------------------------------------------------------------------===//
62 typedef std::map<int64_t, int64_t> OverlapIntervalsTy;
63 typedef DenseMap<Instruction *, OverlapIntervalsTy> InstOverlapIntervalsTy;
65 /// Delete this instruction. Before we do, go through and zero out all the
66 /// operands of this instruction. If any of them become dead, delete them and
67 /// the computation tree that feeds them.
68 /// If ValueSet is non-null, remove any deleted instructions from it as well.
70 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
71 MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
72 InstOverlapIntervalsTy &IOL,
73 DenseMap<Instruction*, size_t> *InstrOrdering,
74 SmallSetVector<Value *, 16> *ValueSet = nullptr) {
75 SmallVector<Instruction*, 32> NowDeadInsts;
77 NowDeadInsts.push_back(I);
80 // Keeping the iterator straight is a pain, so we let this routine tell the
81 // caller what the next instruction is after we're done mucking about.
82 BasicBlock::iterator NewIter = *BBI;
84 // Before we touch this instruction, remove it from memdep!
86 Instruction *DeadInst = NowDeadInsts.pop_back_val();
89 // This instruction is dead, zap it, in stages. Start by removing it from
90 // MemDep, which needs to know the operands and needs it to be in the
92 MD.removeInstruction(DeadInst);
94 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
95 Value *Op = DeadInst->getOperand(op);
96 DeadInst->setOperand(op, nullptr);
98 // If this operand just became dead, add it to the NowDeadInsts list.
99 if (!Op->use_empty()) continue;
101 if (Instruction *OpI = dyn_cast<Instruction>(Op))
102 if (isInstructionTriviallyDead(OpI, &TLI))
103 NowDeadInsts.push_back(OpI);
106 if (ValueSet) ValueSet->remove(DeadInst);
107 InstrOrdering->erase(DeadInst);
110 if (NewIter == DeadInst->getIterator())
111 NewIter = DeadInst->eraseFromParent();
113 DeadInst->eraseFromParent();
114 } while (!NowDeadInsts.empty());
118 /// Does this instruction write some memory? This only returns true for things
119 /// that we can analyze with other helpers below.
120 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
121 if (isa<StoreInst>(I))
123 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
124 switch (II->getIntrinsicID()) {
127 case Intrinsic::memset:
128 case Intrinsic::memmove:
129 case Intrinsic::memcpy:
130 case Intrinsic::init_trampoline:
131 case Intrinsic::lifetime_end:
135 if (auto CS = CallSite(I)) {
136 if (Function *F = CS.getCalledFunction()) {
137 StringRef FnName = F->getName();
138 if (TLI.has(LibFunc::strcpy) && FnName == TLI.getName(LibFunc::strcpy))
140 if (TLI.has(LibFunc::strncpy) && FnName == TLI.getName(LibFunc::strncpy))
142 if (TLI.has(LibFunc::strcat) && FnName == TLI.getName(LibFunc::strcat))
144 if (TLI.has(LibFunc::strncat) && FnName == TLI.getName(LibFunc::strncat))
151 /// Return a Location stored to by the specified instruction. If isRemovable
152 /// returns true, this function and getLocForRead completely describe the memory
153 /// operations for this instruction.
154 static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
155 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
156 return MemoryLocation::get(SI);
158 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
159 // memcpy/memmove/memset.
160 MemoryLocation Loc = MemoryLocation::getForDest(MI);
164 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
166 return MemoryLocation();
168 switch (II->getIntrinsicID()) {
170 return MemoryLocation(); // Unhandled intrinsic.
171 case Intrinsic::init_trampoline:
172 // FIXME: We don't know the size of the trampoline, so we can't really
174 return MemoryLocation(II->getArgOperand(0));
175 case Intrinsic::lifetime_end: {
176 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
177 return MemoryLocation(II->getArgOperand(1), Len);
182 /// Return the location read by the specified "hasMemoryWrite" instruction if
184 static MemoryLocation getLocForRead(Instruction *Inst,
185 const TargetLibraryInfo &TLI) {
186 assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
188 // The only instructions that both read and write are the mem transfer
189 // instructions (memcpy/memmove).
190 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
191 return MemoryLocation::getForSource(MTI);
192 return MemoryLocation();
195 /// If the value of this instruction and the memory it writes to is unused, may
196 /// we delete this instruction?
197 static bool isRemovable(Instruction *I) {
198 // Don't remove volatile/atomic stores.
199 if (StoreInst *SI = dyn_cast<StoreInst>(I))
200 return SI->isUnordered();
202 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
203 switch (II->getIntrinsicID()) {
204 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
205 case Intrinsic::lifetime_end:
206 // Never remove dead lifetime_end's, e.g. because it is followed by a
209 case Intrinsic::init_trampoline:
210 // Always safe to remove init_trampoline.
213 case Intrinsic::memset:
214 case Intrinsic::memmove:
215 case Intrinsic::memcpy:
216 // Don't remove volatile memory intrinsics.
217 return !cast<MemIntrinsic>(II)->isVolatile();
221 if (auto CS = CallSite(I))
222 return CS.getInstruction()->use_empty();
228 /// Returns true if the end of this instruction can be safely shortened in
230 static bool isShortenableAtTheEnd(Instruction *I) {
231 // Don't shorten stores for now
232 if (isa<StoreInst>(I))
235 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
236 switch (II->getIntrinsicID()) {
237 default: return false;
238 case Intrinsic::memset:
239 case Intrinsic::memcpy:
240 // Do shorten memory intrinsics.
241 // FIXME: Add memmove if it's also safe to transform.
246 // Don't shorten libcalls calls for now.
251 /// Returns true if the beginning of this instruction can be safely shortened
253 static bool isShortenableAtTheBeginning(Instruction *I) {
254 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
255 // easily done by offsetting the source address.
256 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
257 return II && II->getIntrinsicID() == Intrinsic::memset;
260 /// Return the pointer that is being written to.
261 static Value *getStoredPointerOperand(Instruction *I) {
262 if (StoreInst *SI = dyn_cast<StoreInst>(I))
263 return SI->getPointerOperand();
264 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
265 return MI->getDest();
267 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
268 switch (II->getIntrinsicID()) {
269 default: llvm_unreachable("Unexpected intrinsic!");
270 case Intrinsic::init_trampoline:
271 return II->getArgOperand(0);
276 // All the supported functions so far happen to have dest as their first
278 return CS.getArgument(0);
281 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
282 const TargetLibraryInfo &TLI) {
284 if (getObjectSize(V, Size, DL, &TLI))
286 return MemoryLocation::UnknownSize;
290 enum OverwriteResult {
298 /// Return 'OverwriteComplete' if a store to the 'Later' location completely
299 /// overwrites a store to the 'Earlier' location, 'OverwriteEnd' if the end of
300 /// the 'Earlier' location is completely overwritten by 'Later',
301 /// 'OverwriteBegin' if the beginning of the 'Earlier' location is overwritten
302 /// by 'Later', or 'OverwriteUnknown' if nothing can be determined.
303 static OverwriteResult isOverwrite(const MemoryLocation &Later,
304 const MemoryLocation &Earlier,
305 const DataLayout &DL,
306 const TargetLibraryInfo &TLI,
307 int64_t &EarlierOff, int64_t &LaterOff,
308 Instruction *DepWrite,
309 InstOverlapIntervalsTy &IOL) {
310 // If we don't know the sizes of either access, then we can't do a comparison.
311 if (Later.Size == MemoryLocation::UnknownSize ||
312 Earlier.Size == MemoryLocation::UnknownSize)
313 return OverwriteUnknown;
315 const Value *P1 = Earlier.Ptr->stripPointerCasts();
316 const Value *P2 = Later.Ptr->stripPointerCasts();
318 // If the start pointers are the same, we just have to compare sizes to see if
319 // the later store was larger than the earlier store.
321 // Make sure that the Later size is >= the Earlier size.
322 if (Later.Size >= Earlier.Size)
323 return OverwriteComplete;
326 // Check to see if the later store is to the entire object (either a global,
327 // an alloca, or a byval/inalloca argument). If so, then it clearly
328 // overwrites any other store to the same object.
329 const Value *UO1 = GetUnderlyingObject(P1, DL),
330 *UO2 = GetUnderlyingObject(P2, DL);
332 // If we can't resolve the same pointers to the same object, then we can't
333 // analyze them at all.
335 return OverwriteUnknown;
337 // If the "Later" store is to a recognizable object, get its size.
338 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
339 if (ObjectSize != MemoryLocation::UnknownSize)
340 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
341 return OverwriteComplete;
343 // Okay, we have stores to two completely different pointers. Try to
344 // decompose the pointer into a "base + constant_offset" form. If the base
345 // pointers are equal, then we can reason about the two stores.
348 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
349 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
351 // If the base pointers still differ, we have two completely different stores.
353 return OverwriteUnknown;
355 // The later store completely overlaps the earlier store if:
357 // 1. Both start at the same offset and the later one's size is greater than
358 // or equal to the earlier one's, or
363 // 2. The earlier store has an offset greater than the later offset, but which
364 // still lies completely within the later store.
367 // |----- later ------|
369 // We have to be careful here as *Off is signed while *.Size is unsigned.
370 if (EarlierOff >= LaterOff &&
371 Later.Size >= Earlier.Size &&
372 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
373 return OverwriteComplete;
375 // We may now overlap, although the overlap is not complete. There might also
376 // be other incomplete overlaps, and together, they might cover the complete
378 // Note: The correctness of this logic depends on the fact that this function
379 // is not even called providing DepWrite when there are any intervening reads.
380 if (EnablePartialOverwriteTracking &&
381 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
382 int64_t(LaterOff + Later.Size) >= EarlierOff) {
384 // Insert our part of the overlap into the map.
385 auto &IM = IOL[DepWrite];
386 DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " <<
387 int64_t(EarlierOff + Earlier.Size) << ") Later [" <<
388 LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n");
390 // Make sure that we only insert non-overlapping intervals and combine
391 // adjacent intervals. The intervals are stored in the map with the ending
392 // offset as the key (in the half-open sense) and the starting offset as
394 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
396 // Find any intervals ending at, or after, LaterIntStart which start
397 // before LaterIntEnd.
398 auto ILI = IM.lower_bound(LaterIntStart);
399 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
400 // This existing interval is overlapped with the current store somewhere
401 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
402 // intervals and adjusting our start and end.
403 LaterIntStart = std::min(LaterIntStart, ILI->second);
404 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
407 // Continue erasing and adjusting our end in case other previous
408 // intervals are also overlapped with the current store.
410 // |--- ealier 1 ---| |--- ealier 2 ---|
411 // |------- later---------|
413 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
414 assert(ILI->second > LaterIntStart && "Unexpected interval");
415 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
420 IM[LaterIntEnd] = LaterIntStart;
423 if (ILI->second <= EarlierOff &&
424 ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
425 DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" <<
426 EarlierOff << ", " <<
427 int64_t(EarlierOff + Earlier.Size) <<
428 ") Composite Later [" <<
429 ILI->second << ", " << ILI->first << ")\n");
430 ++NumCompletePartials;
431 return OverwriteComplete;
435 // Another interesting case is if the later store overwrites the end of the
441 // In this case we may want to trim the size of earlier to avoid generating
442 // writes to addresses which will definitely be overwritten later
443 if (!EnablePartialOverwriteTracking &&
444 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
445 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
448 // Finally, we also need to check if the later store overwrites the beginning
449 // of the earlier store.
454 // In this case we may want to move the destination address and trim the size
455 // of earlier to avoid generating writes to addresses which will definitely
456 // be overwritten later.
457 if (!EnablePartialOverwriteTracking &&
458 (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
459 assert(int64_t(LaterOff + Later.Size) <
460 int64_t(EarlierOff + Earlier.Size) &&
461 "Expect to be handled as OverwriteComplete");
462 return OverwriteBegin;
464 // Otherwise, they don't completely overlap.
465 return OverwriteUnknown;
468 /// If 'Inst' might be a self read (i.e. a noop copy of a
469 /// memory region into an identical pointer) then it doesn't actually make its
470 /// input dead in the traditional sense. Consider this case:
475 /// In this case, the second store to A does not make the first store to A dead.
476 /// The usual situation isn't an explicit A<-A store like this (which can be
477 /// trivially removed) but a case where two pointers may alias.
479 /// This function detects when it is unsafe to remove a dependent instruction
480 /// because the DSE inducing instruction may be a self-read.
481 static bool isPossibleSelfRead(Instruction *Inst,
482 const MemoryLocation &InstStoreLoc,
483 Instruction *DepWrite,
484 const TargetLibraryInfo &TLI,
486 // Self reads can only happen for instructions that read memory. Get the
488 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
489 if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
491 // If the read and written loc obviously don't alias, it isn't a read.
492 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
494 // Okay, 'Inst' may copy over itself. However, we can still remove a the
495 // DepWrite instruction if we can prove that it reads from the same location
496 // as Inst. This handles useful cases like:
499 // Here we don't know if A/B may alias, but we do know that B/B are must
500 // aliases, so removing the first memcpy is safe (assuming it writes <= #
501 // bytes as the second one.
502 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
504 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
507 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
508 // then it can't be considered dead.
512 /// Returns true if the memory which is accessed by the second instruction is not
513 /// modified between the first and the second instruction.
514 /// Precondition: Second instruction must be dominated by the first
516 static bool memoryIsNotModifiedBetween(Instruction *FirstI,
517 Instruction *SecondI,
519 SmallVector<BasicBlock *, 16> WorkList;
520 SmallPtrSet<BasicBlock *, 8> Visited;
521 BasicBlock::iterator FirstBBI(FirstI);
523 BasicBlock::iterator SecondBBI(SecondI);
524 BasicBlock *FirstBB = FirstI->getParent();
525 BasicBlock *SecondBB = SecondI->getParent();
526 MemoryLocation MemLoc = MemoryLocation::get(SecondI);
528 // Start checking the store-block.
529 WorkList.push_back(SecondBB);
530 bool isFirstBlock = true;
532 // Check all blocks going backward until we reach the load-block.
533 while (!WorkList.empty()) {
534 BasicBlock *B = WorkList.pop_back_val();
536 // Ignore instructions before LI if this is the FirstBB.
537 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
539 BasicBlock::iterator EI;
541 // Ignore instructions after SI if this is the first visit of SecondBB.
542 assert(B == SecondBB && "first block is not the store block");
544 isFirstBlock = false;
546 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
547 // In this case we also have to look at instructions after SI.
550 for (; BI != EI; ++BI) {
551 Instruction *I = &*BI;
552 if (I->mayWriteToMemory() && I != SecondI) {
553 auto Res = AA->getModRefInfo(I, MemLoc);
554 if (Res != MRI_NoModRef)
559 assert(B != &FirstBB->getParent()->getEntryBlock() &&
560 "Should not hit the entry block because SI must be dominated by LI");
561 for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
562 if (!Visited.insert(*PredI).second)
564 WorkList.push_back(*PredI);
571 /// Find all blocks that will unconditionally lead to the block BB and append
573 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
574 BasicBlock *BB, DominatorTree *DT) {
575 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
576 BasicBlock *Pred = *I;
577 if (Pred == BB) continue;
578 TerminatorInst *PredTI = Pred->getTerminator();
579 if (PredTI->getNumSuccessors() != 1)
582 if (DT->isReachableFromEntry(Pred))
583 Blocks.push_back(Pred);
587 /// Handle frees of entire structures whose dependency is a store
588 /// to a field of that structure.
589 static bool handleFree(CallInst *F, AliasAnalysis *AA,
590 MemoryDependenceResults *MD, DominatorTree *DT,
591 const TargetLibraryInfo *TLI,
592 InstOverlapIntervalsTy &IOL,
593 DenseMap<Instruction*, size_t> *InstrOrdering) {
594 bool MadeChange = false;
596 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
597 SmallVector<BasicBlock *, 16> Blocks;
598 Blocks.push_back(F->getParent());
599 const DataLayout &DL = F->getModule()->getDataLayout();
601 while (!Blocks.empty()) {
602 BasicBlock *BB = Blocks.pop_back_val();
603 Instruction *InstPt = BB->getTerminator();
604 if (BB == F->getParent()) InstPt = F;
607 MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
608 while (Dep.isDef() || Dep.isClobber()) {
609 Instruction *Dependency = Dep.getInst();
610 if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
614 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
616 // Check for aliasing.
617 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
620 DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
621 << *Dependency << '\n');
623 // DCE instructions only used to calculate that store.
624 BasicBlock::iterator BBI(Dependency);
625 deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
629 // Inst's old Dependency is now deleted. Compute the next dependency,
630 // which may also be dead, as in
632 // s[1] = 0; // This has just been deleted.
634 Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
637 if (Dep.isNonLocal())
638 findUnconditionalPreds(Blocks, BB, DT);
644 /// Check to see if the specified location may alias any of the stack objects in
645 /// the DeadStackObjects set. If so, they become live because the location is
647 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
648 SmallSetVector<Value *, 16> &DeadStackObjects,
649 const DataLayout &DL, AliasAnalysis *AA,
650 const TargetLibraryInfo *TLI) {
651 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
653 // A constant can't be in the dead pointer set.
654 if (isa<Constant>(UnderlyingPointer))
657 // If the kill pointer can be easily reduced to an alloca, don't bother doing
658 // extraneous AA queries.
659 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
660 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
664 // Remove objects that could alias LoadedLoc.
665 DeadStackObjects.remove_if([&](Value *I) {
666 // See if the loaded location could alias the stack location.
667 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
668 return !AA->isNoAlias(StackLoc, LoadedLoc);
672 /// Remove dead stores to stack-allocated locations in the function end block.
676 /// store i32 1, i32* %A
678 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
679 MemoryDependenceResults *MD,
680 const TargetLibraryInfo *TLI,
681 InstOverlapIntervalsTy &IOL,
682 DenseMap<Instruction*, size_t> *InstrOrdering) {
683 bool MadeChange = false;
685 // Keep track of all of the stack objects that are dead at the end of the
687 SmallSetVector<Value*, 16> DeadStackObjects;
689 // Find all of the alloca'd pointers in the entry block.
690 BasicBlock &Entry = BB.getParent()->front();
691 for (Instruction &I : Entry) {
692 if (isa<AllocaInst>(&I))
693 DeadStackObjects.insert(&I);
695 // Okay, so these are dead heap objects, but if the pointer never escapes
696 // then it's leaked by this function anyways.
697 else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
698 DeadStackObjects.insert(&I);
701 // Treat byval or inalloca arguments the same, stores to them are dead at the
702 // end of the function.
703 for (Argument &AI : BB.getParent()->args())
704 if (AI.hasByValOrInAllocaAttr())
705 DeadStackObjects.insert(&AI);
707 const DataLayout &DL = BB.getModule()->getDataLayout();
709 // Scan the basic block backwards
710 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
713 // If we find a store, check to see if it points into a dead stack value.
714 if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
715 // See through pointer-to-pointer bitcasts
716 SmallVector<Value *, 4> Pointers;
717 GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
719 // Stores to stack values are valid candidates for removal.
721 for (Value *Pointer : Pointers)
722 if (!DeadStackObjects.count(Pointer)) {
728 Instruction *Dead = &*BBI;
730 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
731 << *Dead << "\n Objects: ";
732 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
733 E = Pointers.end(); I != E; ++I) {
735 if (std::next(I) != E)
740 // DCE instructions only used to calculate that store.
741 deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
748 // Remove any dead non-memory-mutating instructions.
749 if (isInstructionTriviallyDead(&*BBI, TLI)) {
750 DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
752 deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
758 if (isa<AllocaInst>(BBI)) {
759 // Remove allocas from the list of dead stack objects; there can't be
760 // any references before the definition.
761 DeadStackObjects.remove(&*BBI);
765 if (auto CS = CallSite(&*BBI)) {
766 // Remove allocation function calls from the list of dead stack objects;
767 // there can't be any references before the definition.
768 if (isAllocLikeFn(&*BBI, TLI))
769 DeadStackObjects.remove(&*BBI);
771 // If this call does not access memory, it can't be loading any of our
773 if (AA->doesNotAccessMemory(CS))
776 // If the call might load from any of our allocas, then any store above
778 DeadStackObjects.remove_if([&](Value *I) {
779 // See if the call site touches the value.
780 ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
782 return A == MRI_ModRef || A == MRI_Ref;
785 // If all of the allocas were clobbered by the call then we're not going
786 // to find anything else to process.
787 if (DeadStackObjects.empty())
793 // We can remove the dead stores, irrespective of the fence and its ordering
794 // (release/acquire/seq_cst). Fences only constraints the ordering of
795 // already visible stores, it does not make a store visible to other
796 // threads. So, skipping over a fence does not change a store from being
798 if (isa<FenceInst>(*BBI))
801 MemoryLocation LoadedLoc;
803 // If we encounter a use of the pointer, it is no longer considered dead
804 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
805 if (!L->isUnordered()) // Be conservative with atomic/volatile load
807 LoadedLoc = MemoryLocation::get(L);
808 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
809 LoadedLoc = MemoryLocation::get(V);
810 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
811 LoadedLoc = MemoryLocation::getForSource(MTI);
812 } else if (!BBI->mayReadFromMemory()) {
813 // Instruction doesn't read memory. Note that stores that weren't removed
814 // above will hit this case.
817 // Unknown inst; assume it clobbers everything.
821 // Remove any allocas from the DeadPointer set that are loaded, as this
822 // makes any stores above the access live.
823 removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI);
825 // If all of the allocas were clobbered by the access then we're not going
826 // to find anything else to process.
827 if (DeadStackObjects.empty())
834 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
835 int64_t &EarlierSize, int64_t LaterOffset,
836 int64_t LaterSize, bool IsOverwriteEnd) {
837 // TODO: base this on the target vector size so that if the earlier
838 // store was too small to get vector writes anyway then its likely
839 // a good idea to shorten it
840 // Power of 2 vector writes are probably always a bad idea to optimize
841 // as any store/memset/memcpy is likely using vector instructions so
842 // shortening it to not vector size is likely to be slower
843 MemIntrinsic *EarlierIntrinsic = cast<MemIntrinsic>(EarlierWrite);
844 unsigned EarlierWriteAlign = EarlierIntrinsic->getAlignment();
846 LaterOffset = int64_t(LaterOffset + LaterSize);
848 if (!(llvm::isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
849 !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
852 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
853 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
854 << "\n KILLER (offset " << LaterOffset << ", " << EarlierSize
857 int64_t NewLength = IsOverwriteEnd
858 ? LaterOffset - EarlierOffset
859 : EarlierSize - (LaterOffset - EarlierOffset);
861 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
862 Value *TrimmedLength =
863 ConstantInt::get(EarlierWriteLength->getType(), NewLength);
864 EarlierIntrinsic->setLength(TrimmedLength);
866 EarlierSize = NewLength;
867 if (!IsOverwriteEnd) {
868 int64_t OffsetMoved = (LaterOffset - EarlierOffset);
869 Value *Indices[1] = {
870 ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
871 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
872 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
873 EarlierIntrinsic->setDest(NewDestGEP);
874 EarlierOffset = EarlierOffset + OffsetMoved;
879 static bool tryToShortenEnd(Instruction *EarlierWrite,
880 OverlapIntervalsTy &IntervalMap,
881 int64_t &EarlierStart, int64_t &EarlierSize) {
882 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
885 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
886 int64_t LaterStart = OII->second;
887 int64_t LaterSize = OII->first - LaterStart;
889 if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
890 LaterStart + LaterSize >= EarlierStart + EarlierSize) {
891 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
893 IntervalMap.erase(OII);
900 static bool tryToShortenBegin(Instruction *EarlierWrite,
901 OverlapIntervalsTy &IntervalMap,
902 int64_t &EarlierStart, int64_t &EarlierSize) {
903 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
906 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
907 int64_t LaterStart = OII->second;
908 int64_t LaterSize = OII->first - LaterStart;
910 if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
911 assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
912 "Should have been handled as OverwriteComplete");
913 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
915 IntervalMap.erase(OII);
922 static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
923 const DataLayout &DL,
924 InstOverlapIntervalsTy &IOL) {
925 bool Changed = false;
926 for (auto OI : IOL) {
927 Instruction *EarlierWrite = OI.first;
928 MemoryLocation Loc = getLocForWrite(EarlierWrite, *AA);
929 assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
930 assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
932 const Value *Ptr = Loc.Ptr->stripPointerCasts();
933 int64_t EarlierStart = 0;
934 int64_t EarlierSize = int64_t(Loc.Size);
935 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
936 OverlapIntervalsTy &IntervalMap = OI.second;
938 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
939 if (IntervalMap.empty())
942 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
947 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
948 AliasAnalysis *AA, MemoryDependenceResults *MD,
949 const DataLayout &DL,
950 const TargetLibraryInfo *TLI,
951 InstOverlapIntervalsTy &IOL,
952 DenseMap<Instruction*, size_t> *InstrOrdering) {
953 // Must be a store instruction.
954 StoreInst *SI = dyn_cast<StoreInst>(Inst);
958 // If we're storing the same value back to a pointer that we just loaded from,
959 // then the store can be removed.
960 if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
961 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
962 isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
964 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
965 << *DepLoad << "\n STORE: " << *SI << '\n');
967 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
968 ++NumRedundantStores;
973 // Remove null stores into the calloc'ed objects
974 Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
975 if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
976 Instruction *UnderlyingPointer =
977 dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
979 if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
980 memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
982 dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
983 << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
985 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
986 ++NumRedundantStores;
993 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
994 MemoryDependenceResults *MD, DominatorTree *DT,
995 const TargetLibraryInfo *TLI) {
996 const DataLayout &DL = BB.getModule()->getDataLayout();
997 bool MadeChange = false;
999 // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1000 // The current OrderedBasicBlock can't deal with mutation at the moment.
1001 size_t LastThrowingInstIndex = 0;
1002 DenseMap<Instruction*, size_t> InstrOrdering;
1003 size_t InstrIndex = 1;
1005 // A map of interval maps representing partially-overwritten value parts.
1006 InstOverlapIntervalsTy IOL;
1008 // Do a top-down walk on the BB.
1009 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1010 // Handle 'free' calls specially.
1011 if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1012 MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1013 // Increment BBI after handleFree has potentially deleted instructions.
1014 // This ensures we maintain a valid iterator.
1019 Instruction *Inst = &*BBI++;
1021 size_t CurInstNumber = InstrIndex++;
1022 InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1023 if (Inst->mayThrow()) {
1024 LastThrowingInstIndex = CurInstNumber;
1028 // Check to see if Inst writes to memory. If not, continue.
1029 if (!hasMemoryWrite(Inst, *TLI))
1032 // eliminateNoopStore will update in iterator, if necessary.
1033 if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1038 // If we find something that writes memory, get its memory dependence.
1039 MemDepResult InstDep = MD->getDependency(Inst);
1041 // Ignore any store where we can't find a local dependence.
1042 // FIXME: cross-block DSE would be fun. :)
1043 if (!InstDep.isDef() && !InstDep.isClobber())
1046 // Figure out what location is being stored to.
1047 MemoryLocation Loc = getLocForWrite(Inst, *AA);
1049 // If we didn't get a useful location, fail.
1053 // Loop until we find a store we can eliminate or a load that
1054 // invalidates the analysis. Without an upper bound on the number of
1055 // instructions examined, this analysis can become very time-consuming.
1056 // However, the potential gain diminishes as we process more instructions
1057 // without eliminating any of them. Therefore, we limit the number of
1058 // instructions we look at.
1059 auto Limit = MD->getDefaultBlockScanLimit();
1060 while (InstDep.isDef() || InstDep.isClobber()) {
1061 // Get the memory clobbered by the instruction we depend on. MemDep will
1062 // skip any instructions that 'Loc' clearly doesn't interact with. If we
1063 // end up depending on a may- or must-aliased load, then we can't optimize
1064 // away the store and we bail out. However, if we depend on something
1065 // that overwrites the memory location we *can* potentially optimize it.
1067 // Find out what memory location the dependent instruction stores.
1068 Instruction *DepWrite = InstDep.getInst();
1069 MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
1070 // If we didn't get a useful location, or if it isn't a size, bail out.
1074 // Make sure we don't look past a call which might throw. This is an
1075 // issue because MemoryDependenceAnalysis works in the wrong direction:
1076 // it finds instructions which dominate the current instruction, rather than
1077 // instructions which are post-dominated by the current instruction.
1079 // If the underlying object is a non-escaping memory allocation, any store
1080 // to it is dead along the unwind edge. Otherwise, we need to preserve
1082 size_t DepIndex = InstrOrdering.lookup(DepWrite);
1083 assert(DepIndex && "Unexpected instruction");
1084 if (DepIndex <= LastThrowingInstIndex) {
1085 const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1086 bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1087 if (!IsStoreDeadOnUnwind) {
1088 // We're looking for a call to an allocation function
1089 // where the allocation doesn't escape before the last
1090 // throwing instruction; PointerMayBeCaptured
1091 // reasonably fast approximation.
1092 IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1093 !PointerMayBeCaptured(Underlying, false, true);
1095 if (!IsStoreDeadOnUnwind)
1099 // If we find a write that is a) removable (i.e., non-volatile), b) is
1100 // completely obliterated by the store to 'Loc', and c) which we know that
1101 // 'Inst' doesn't load from, then we can remove it.
1102 if (isRemovable(DepWrite) &&
1103 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1104 int64_t InstWriteOffset, DepWriteOffset;
1105 OverwriteResult OR =
1106 isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset,
1108 if (OR == OverwriteComplete) {
1109 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
1110 << *DepWrite << "\n KILLER: " << *Inst << '\n');
1112 // Delete the store and now-dead instructions that feed it.
1113 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1117 // We erased DepWrite; start over.
1118 InstDep = MD->getDependency(Inst);
1120 } else if ((OR == OverwriteEnd && isShortenableAtTheEnd(DepWrite)) ||
1121 ((OR == OverwriteBegin &&
1122 isShortenableAtTheBeginning(DepWrite)))) {
1123 assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1124 "when partial-overwrite "
1125 "tracking is enabled");
1126 int64_t EarlierSize = DepLoc.Size;
1127 int64_t LaterSize = Loc.Size;
1128 bool IsOverwriteEnd = (OR == OverwriteEnd);
1129 MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1130 InstWriteOffset, LaterSize, IsOverwriteEnd);
1134 // If this is a may-aliased store that is clobbering the store value, we
1135 // can keep searching past it for another must-aliased pointer that stores
1136 // to the same location. For example, in:
1140 // we can remove the first store to P even though we don't know if P and Q
1142 if (DepWrite == &BB.front()) break;
1144 // Can't look past this instruction if it might read 'Loc'.
1145 if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
1148 InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1149 DepWrite->getIterator(), &BB,
1150 /*QueryInst=*/ nullptr, &Limit);
1154 if (EnablePartialOverwriteTracking)
1155 MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1157 // If this block ends in a return, unwind, or unreachable, all allocas are
1158 // dead at its end, which means stores to them are also dead.
1159 if (BB.getTerminator()->getNumSuccessors() == 0)
1160 MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1165 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1166 MemoryDependenceResults *MD, DominatorTree *DT,
1167 const TargetLibraryInfo *TLI) {
1168 bool MadeChange = false;
1169 for (BasicBlock &BB : F)
1170 // Only check non-dead blocks. Dead blocks may have strange pointer
1171 // cycles that will confuse alias analysis.
1172 if (DT->isReachableFromEntry(&BB))
1173 MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1178 //===----------------------------------------------------------------------===//
1180 //===----------------------------------------------------------------------===//
1181 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1182 AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1183 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1184 MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1185 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1187 if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1188 return PreservedAnalyses::all();
1189 PreservedAnalyses PA;
1190 PA.preserve<DominatorTreeAnalysis>();
1191 PA.preserve<GlobalsAA>();
1192 PA.preserve<MemoryDependenceAnalysis>();
1197 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1198 class DSELegacyPass : public FunctionPass {
1200 DSELegacyPass() : FunctionPass(ID) {
1201 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
1204 bool runOnFunction(Function &F) override {
1205 if (skipFunction(F))
1208 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1209 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1210 MemoryDependenceResults *MD =
1211 &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1212 const TargetLibraryInfo *TLI =
1213 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1215 return eliminateDeadStores(F, AA, MD, DT, TLI);
1218 void getAnalysisUsage(AnalysisUsage &AU) const override {
1219 AU.setPreservesCFG();
1220 AU.addRequired<DominatorTreeWrapperPass>();
1221 AU.addRequired<AAResultsWrapperPass>();
1222 AU.addRequired<MemoryDependenceWrapperPass>();
1223 AU.addRequired<TargetLibraryInfoWrapperPass>();
1224 AU.addPreserved<DominatorTreeWrapperPass>();
1225 AU.addPreserved<GlobalsAAWrapperPass>();
1226 AU.addPreserved<MemoryDependenceWrapperPass>();
1229 static char ID; // Pass identification, replacement for typeid
1231 } // end anonymous namespace
1233 char DSELegacyPass::ID = 0;
1234 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1236 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1237 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1238 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1239 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1240 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1241 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1244 FunctionPass *llvm::createDeadStoreEliminationPass() {
1245 return new DSELegacyPass();