1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking
18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by
19 // checking all uses starting at MaybeDeadAccess and walking until we see
21 // 3. For each found CurrentDef, check that:
22 // 1. There are no barrier instructions between CurrentDef and StartDef (like
23 // throws or stores with ordering constraints).
24 // 2. StartDef is executed whenever CurrentDef is executed.
25 // 3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
28 //===----------------------------------------------------------------------===//
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/MemoryBuiltins.h"
45 #include "llvm/Analysis/MemoryLocation.h"
46 #include "llvm/Analysis/MemorySSA.h"
47 #include "llvm/Analysis/MemorySSAUpdater.h"
48 #include "llvm/Analysis/MustExecute.h"
49 #include "llvm/Analysis/PostDominators.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstIterator.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/Intrinsics.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/PassManager.h"
69 #include "llvm/IR/PatternMatch.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/InitializePasses.h"
72 #include "llvm/Pass.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/DebugCounter.h"
77 #include "llvm/Support/ErrorHandling.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82 #include "llvm/Transforms/Utils/BuildLibCalls.h"
83 #include "llvm/Transforms/Utils/Local.h"
93 using namespace PatternMatch;
95 #define DEBUG_TYPE "dse"
97 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
98 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
99 STATISTIC(NumFastStores, "Number of stores deleted");
100 STATISTIC(NumFastOther, "Number of other instrs removed");
101 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
102 STATISTIC(NumModifiedStores, "Number of stores modified");
103 STATISTIC(NumCFGChecks, "Number of stores modified");
104 STATISTIC(NumCFGTries, "Number of stores modified");
105 STATISTIC(NumCFGSuccess, "Number of stores modified");
106 STATISTIC(NumGetDomMemoryDefPassed,
107 "Number of times a valid candidate is returned from getDomMemoryDef");
108 STATISTIC(NumDomMemDefChecks,
109 "Number iterations check for reads in getDomMemoryDef");
111 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
112 "Controls which MemoryDefs are eliminated.");
115 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
116 cl::init(true), cl::Hidden,
117 cl::desc("Enable partial-overwrite tracking in DSE"));
120 EnablePartialStoreMerging("enable-dse-partial-store-merging",
121 cl::init(true), cl::Hidden,
122 cl::desc("Enable partial store merging in DSE"));
124 static cl::opt<unsigned>
125 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
126 cl::desc("The number of memory instructions to scan for "
127 "dead store elimination (default = 150)"));
128 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
129 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
130 cl::desc("The maximum number of steps while walking upwards to find "
131 "MemoryDefs that may be killed (default = 90)"));
133 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
134 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
135 cl::desc("The maximum number candidates that only partially overwrite the "
136 "killing MemoryDef to consider"
139 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
140 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
141 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
142 "other stores per basic block (default = 5000)"));
144 static cl::opt<unsigned> MemorySSASameBBStepCost(
145 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
147 "The cost of a step in the same basic block as the killing MemoryDef"
150 static cl::opt<unsigned>
151 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
153 cl::desc("The cost of a step in a different basic "
154 "block than the killing MemoryDef"
157 static cl::opt<unsigned> MemorySSAPathCheckLimit(
158 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
159 cl::desc("The maximum number of blocks to check when trying to prove that "
160 "all paths to an exit go through a killing block (default = 50)"));
162 // This flags allows or disallows DSE to optimize MemorySSA during its
163 // traversal. Note that DSE optimizing MemorySSA may impact other passes
164 // downstream of the DSE invocation and can lead to issues not being
165 // reproducible in isolation (i.e. when MemorySSA is built from scratch). In
166 // those cases, the flag can be used to check if DSE's MemorySSA optimizations
167 // impact follow-up passes.
169 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden,
170 cl::desc("Allow DSE to optimize memory accesses."));
172 //===----------------------------------------------------------------------===//
174 //===----------------------------------------------------------------------===//
175 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
176 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
178 /// Returns true if the end of this instruction can be safely shortened in
180 static bool isShortenableAtTheEnd(Instruction *I) {
181 // Don't shorten stores for now
182 if (isa<StoreInst>(I))
185 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
186 switch (II->getIntrinsicID()) {
187 default: return false;
188 case Intrinsic::memset:
189 case Intrinsic::memcpy:
190 case Intrinsic::memcpy_element_unordered_atomic:
191 case Intrinsic::memset_element_unordered_atomic:
192 // Do shorten memory intrinsics.
193 // FIXME: Add memmove if it's also safe to transform.
198 // Don't shorten libcalls calls for now.
203 /// Returns true if the beginning of this instruction can be safely shortened
205 static bool isShortenableAtTheBeginning(Instruction *I) {
206 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
207 // easily done by offsetting the source address.
208 return isa<AnyMemSetInst>(I);
211 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
212 const TargetLibraryInfo &TLI,
216 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
218 if (getObjectSize(V, Size, DL, &TLI, Opts))
220 return MemoryLocation::UnknownSize;
225 enum OverwriteResult {
229 OW_PartialEarlierWithFullLater,
235 } // end anonymous namespace
237 /// Check if two instruction are masked stores that completely
238 /// overwrite one another. More specifically, \p KillingI has to
239 /// overwrite \p DeadI.
240 static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI,
241 const Instruction *DeadI,
242 BatchAAResults &AA) {
243 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI);
244 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI);
245 if (KillingII == nullptr || DeadII == nullptr)
247 if (KillingII->getIntrinsicID() != Intrinsic::masked_store ||
248 DeadII->getIntrinsicID() != Intrinsic::masked_store)
251 Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts();
252 Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts();
253 if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr))
256 // TODO: check that KillingII's mask is a superset of the DeadII's mask.
257 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
262 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely
263 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the
264 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin'
265 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'.
266 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was
267 /// overwritten by a killing (smaller) store which doesn't write outside the big
268 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
269 /// NOTE: This function must only be called if both \p KillingLoc and \p
270 /// DeadLoc belong to the same underlying object with valid \p KillingOff and
272 static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc,
273 const MemoryLocation &DeadLoc,
274 int64_t KillingOff, int64_t DeadOff,
276 InstOverlapIntervalsTy &IOL) {
277 const uint64_t KillingSize = KillingLoc.Size.getValue();
278 const uint64_t DeadSize = DeadLoc.Size.getValue();
279 // We may now overlap, although the overlap is not complete. There might also
280 // be other incomplete overlaps, and together, they might cover the complete
282 // Note: The correctness of this logic depends on the fact that this function
283 // is not even called providing DepWrite when there are any intervening reads.
284 if (EnablePartialOverwriteTracking &&
285 KillingOff < int64_t(DeadOff + DeadSize) &&
286 int64_t(KillingOff + KillingSize) >= DeadOff) {
288 // Insert our part of the overlap into the map.
289 auto &IM = IOL[DeadI];
290 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", "
291 << int64_t(DeadOff + DeadSize) << ") KillingLoc ["
292 << KillingOff << ", " << int64_t(KillingOff + KillingSize)
295 // Make sure that we only insert non-overlapping intervals and combine
296 // adjacent intervals. The intervals are stored in the map with the ending
297 // offset as the key (in the half-open sense) and the starting offset as
299 int64_t KillingIntStart = KillingOff;
300 int64_t KillingIntEnd = KillingOff + KillingSize;
302 // Find any intervals ending at, or after, KillingIntStart which start
303 // before KillingIntEnd.
304 auto ILI = IM.lower_bound(KillingIntStart);
305 if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
306 // This existing interval is overlapped with the current store somewhere
307 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing
308 // intervals and adjusting our start and end.
309 KillingIntStart = std::min(KillingIntStart, ILI->second);
310 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
313 // Continue erasing and adjusting our end in case other previous
314 // intervals are also overlapped with the current store.
316 // |--- dead 1 ---| |--- dead 2 ---|
317 // |------- killing---------|
319 while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
320 assert(ILI->second > KillingIntStart && "Unexpected interval");
321 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
326 IM[KillingIntEnd] = KillingIntStart;
329 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
330 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc ["
331 << DeadOff << ", " << int64_t(DeadOff + DeadSize)
332 << ") Composite KillingLoc [" << ILI->second << ", "
333 << ILI->first << ")\n");
334 ++NumCompletePartials;
339 // Check for a dead store which writes to all the memory locations that
340 // the killing store writes to.
341 if (EnablePartialStoreMerging && KillingOff >= DeadOff &&
342 int64_t(DeadOff + DeadSize) > KillingOff &&
343 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
344 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff
345 << ", " << int64_t(DeadOff + DeadSize)
346 << ") by a killing store [" << KillingOff << ", "
347 << int64_t(KillingOff + KillingSize) << ")\n");
348 // TODO: Maybe come up with a better name?
349 return OW_PartialEarlierWithFullLater;
352 // Another interesting case is if the killing store overwrites the end of the
358 // In this case we may want to trim the size of dead store to avoid
359 // generating stores to addresses which will definitely be overwritten killing
361 if (!EnablePartialOverwriteTracking &&
362 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
363 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
366 // Finally, we also need to check if the killing store overwrites the
367 // beginning of the dead store.
372 // In this case we may want to move the destination address and trim the size
373 // of dead store to avoid generating stores to addresses which will definitely
374 // be overwritten killing store.
375 if (!EnablePartialOverwriteTracking &&
376 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
377 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
378 "Expect to be handled as OW_Complete");
381 // Otherwise, they don't completely overlap.
385 /// Returns true if the memory which is accessed by the second instruction is not
386 /// modified between the first and the second instruction.
387 /// Precondition: Second instruction must be dominated by the first
390 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
391 BatchAAResults &AA, const DataLayout &DL,
393 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
394 // instructions which can modify the memory location accessed by SecondI.
396 // While doing the walk keep track of the address to check. It might be
397 // different in different basic blocks due to PHI translation.
398 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
399 SmallVector<BlockAddressPair, 16> WorkList;
400 // Keep track of the address we visited each block with. Bail out if we
401 // visit a block with different addresses.
402 DenseMap<BasicBlock *, Value *> Visited;
404 BasicBlock::iterator FirstBBI(FirstI);
406 BasicBlock::iterator SecondBBI(SecondI);
407 BasicBlock *FirstBB = FirstI->getParent();
408 BasicBlock *SecondBB = SecondI->getParent();
409 MemoryLocation MemLoc;
410 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI))
411 MemLoc = MemoryLocation::getForDest(MemSet);
413 MemLoc = MemoryLocation::get(SecondI);
415 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
417 // Start checking the SecondBB.
419 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
420 bool isFirstBlock = true;
422 // Check all blocks going backward until we reach the FirstBB.
423 while (!WorkList.empty()) {
424 BlockAddressPair Current = WorkList.pop_back_val();
425 BasicBlock *B = Current.first;
426 PHITransAddr &Addr = Current.second;
427 Value *Ptr = Addr.getAddr();
429 // Ignore instructions before FirstI if this is the FirstBB.
430 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
432 BasicBlock::iterator EI;
434 // Ignore instructions after SecondI if this is the first visit of SecondBB.
435 assert(B == SecondBB && "first block is not the store block");
437 isFirstBlock = false;
439 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
440 // In this case we also have to look at instructions after SecondI.
443 for (; BI != EI; ++BI) {
444 Instruction *I = &*BI;
445 if (I->mayWriteToMemory() && I != SecondI)
446 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
450 assert(B != &FirstBB->getParent()->getEntryBlock() &&
451 "Should not hit the entry block because SI must be dominated by LI");
452 for (BasicBlock *Pred : predecessors(B)) {
453 PHITransAddr PredAddr = Addr;
454 if (PredAddr.NeedsPHITranslationFromBlock(B)) {
455 if (!PredAddr.IsPotentiallyPHITranslatable())
457 if (PredAddr.PHITranslateValue(B, Pred, DT, false))
460 Value *TranslatedPtr = PredAddr.getAddr();
461 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
462 if (!Inserted.second) {
463 // We already visited this block before. If it was with a different
464 // address - bail out!
465 if (TranslatedPtr != Inserted.first->second)
467 // ... otherwise just skip it.
470 WorkList.push_back(std::make_pair(Pred, PredAddr));
477 static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
478 uint64_t &DeadSize, int64_t KillingStart,
479 uint64_t KillingSize, bool IsOverwriteEnd) {
480 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
481 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
483 // We assume that memet/memcpy operates in chunks of the "largest" native
484 // type size and aligned on the same value. That means optimal start and size
485 // of memset/memcpy should be modulo of preferred alignment of that type. That
486 // is it there is no any sense in trying to reduce store size any further
487 // since any "extra" stores comes for free anyway.
488 // On the other hand, maximum alignment we can achieve is limited by alignment
491 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
492 // "largest" native type.
493 // Note: What is the proper way to get that value?
494 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
495 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
497 int64_t ToRemoveStart = 0;
498 uint64_t ToRemoveSize = 0;
499 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
500 // maintained on the remaining store.
501 if (IsOverwriteEnd) {
502 // Calculate required adjustment for 'KillingStart' in order to keep
503 // remaining store size aligned on 'PerfAlign'.
505 offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign);
506 ToRemoveStart = KillingStart + Off;
507 if (DeadSize <= uint64_t(ToRemoveStart - DeadStart))
509 ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart);
511 ToRemoveStart = DeadStart;
512 assert(KillingSize >= uint64_t(DeadStart - KillingStart) &&
513 "Not overlapping accesses?");
514 ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart);
515 // Calculate required adjustment for 'ToRemoveSize'in order to keep
516 // start of the remaining store aligned on 'PerfAlign'.
517 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
519 if (ToRemoveSize <= (PrefAlign.value() - Off))
521 ToRemoveSize -= PrefAlign.value() - Off;
523 assert(isAligned(PrefAlign, ToRemoveSize) &&
524 "Should preserve selected alignment");
527 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
528 assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
530 uint64_t NewSize = DeadSize - ToRemoveSize;
531 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
532 // When shortening an atomic memory intrinsic, the newly shortened
533 // length must remain an integer multiple of the element size.
534 const uint32_t ElementSize = AMI->getElementSizeInBytes();
535 if (0 != NewSize % ElementSize)
539 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
540 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI
541 << "\n KILLER [" << ToRemoveStart << ", "
542 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
544 Value *DeadWriteLength = DeadIntrinsic->getLength();
545 Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize);
546 DeadIntrinsic->setLength(TrimmedLength);
547 DeadIntrinsic->setDestAlignment(PrefAlign);
549 if (!IsOverwriteEnd) {
550 Value *OrigDest = DeadIntrinsic->getRawDest();
552 Type::getInt8PtrTy(DeadIntrinsic->getContext(),
553 OrigDest->getType()->getPointerAddressSpace());
554 Value *Dest = OrigDest;
555 if (OrigDest->getType() != Int8PtrTy)
556 Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI);
557 Value *Indices[1] = {
558 ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)};
559 Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds(
560 Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI);
561 NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc());
562 if (NewDestGEP->getType() != OrigDest->getType())
563 NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(),
565 DeadIntrinsic->setDest(NewDestGEP);
568 // Finally update start and size of dead access.
570 DeadStart += ToRemoveSize;
576 static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap,
577 int64_t &DeadStart, uint64_t &DeadSize) {
578 if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI))
581 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
582 int64_t KillingStart = OII->second;
583 uint64_t KillingSize = OII->first - KillingStart;
585 assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
587 if (KillingStart > DeadStart &&
588 // Note: "KillingStart - KillingStart" is known to be positive due to
590 (uint64_t)(KillingStart - DeadStart) < DeadSize &&
591 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to
592 // be non negative due to preceding checks.
593 KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) {
594 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
596 IntervalMap.erase(OII);
603 static bool tryToShortenBegin(Instruction *DeadI,
604 OverlapIntervalsTy &IntervalMap,
605 int64_t &DeadStart, uint64_t &DeadSize) {
606 if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI))
609 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
610 int64_t KillingStart = OII->second;
611 uint64_t KillingSize = OII->first - KillingStart;
613 assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
615 if (KillingStart <= DeadStart &&
616 // Note: "DeadStart - KillingStart" is known to be non negative due to
618 KillingSize > (uint64_t)(DeadStart - KillingStart)) {
619 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to
620 // be positive due to preceding checks.
621 assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize &&
622 "Should have been handled as OW_Complete");
623 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
625 IntervalMap.erase(OII);
633 tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI,
634 int64_t KillingOffset, int64_t DeadOffset,
635 const DataLayout &DL, BatchAAResults &AA,
638 if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) &&
639 DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) &&
640 KillingI && isa<ConstantInt>(KillingI->getValueOperand()) &&
641 DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) &&
642 memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) {
643 // If the store we find is:
644 // a) partially overwritten by the store to 'Loc'
645 // b) the killing store is fully contained in the dead one and
646 // c) they both have a constant value
647 // d) none of the two stores need padding
648 // Merge the two stores, replacing the dead store's value with a
649 // merge of both values.
650 // TODO: Deal with other constant types (vectors, etc), and probably
651 // some mem intrinsics (if needed)
653 APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue();
655 cast<ConstantInt>(KillingI->getValueOperand())->getValue();
656 unsigned KillingBits = KillingValue.getBitWidth();
657 assert(DeadValue.getBitWidth() > KillingValue.getBitWidth());
658 KillingValue = KillingValue.zext(DeadValue.getBitWidth());
660 // Offset of the smaller store inside the larger store
661 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
662 unsigned LShiftAmount =
663 DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits
665 APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount,
666 LShiftAmount + KillingBits);
667 // Clear the bits we'll be replacing, then OR with the smaller
668 // store, shifted appropriately.
669 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
670 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI
671 << "\n Killing: " << *KillingI
672 << "\n Merged Value: " << Merged << '\n');
673 return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged);
679 // Returns true if \p I is an intrisnic that does not read or write memory.
680 bool isNoopIntrinsic(Instruction *I) {
681 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
682 switch (II->getIntrinsicID()) {
683 case Intrinsic::lifetime_start:
684 case Intrinsic::lifetime_end:
685 case Intrinsic::invariant_end:
686 case Intrinsic::launder_invariant_group:
687 case Intrinsic::assume:
689 case Intrinsic::dbg_addr:
690 case Intrinsic::dbg_declare:
691 case Intrinsic::dbg_label:
692 case Intrinsic::dbg_value:
693 llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
701 // Check if we can ignore \p D for DSE.
702 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
703 Instruction *DI = D->getMemoryInst();
704 // Calls that only access inaccessible memory cannot read or write any memory
705 // locations we consider for elimination.
706 if (auto *CB = dyn_cast<CallBase>(DI))
707 if (CB->onlyAccessesInaccessibleMemory())
710 // We can eliminate stores to locations not visible to the caller across
711 // throwing instructions.
712 if (DI->mayThrow() && !DefVisibleToCaller)
715 // We can remove the dead stores, irrespective of the fence and its ordering
716 // (release/acquire/seq_cst). Fences only constraints the ordering of
717 // already visible stores, it does not make a store visible to other
718 // threads. So, skipping over a fence does not change a store from being
720 if (isa<FenceInst>(DI))
723 // Skip intrinsics that do not really read or modify memory.
724 if (isNoopIntrinsic(DI))
733 EarliestEscapeInfo EI;
735 /// The single BatchAA instance that is used to cache AA queries. It will
736 /// not be invalidated over the whole run. This is safe, because:
737 /// 1. Only memory writes are removed, so the alias cache for memory
738 /// locations remains valid.
739 /// 2. No new instructions are added (only instructions removed), so cached
740 /// information for a deleted value cannot be accessed by a re-used new
742 BatchAAResults BatchAA;
746 PostDominatorTree &PDT;
747 const TargetLibraryInfo &TLI;
748 const DataLayout &DL;
751 // Whether the function contains any irreducible control flow, useful for
752 // being accurately able to detect loops.
753 bool ContainsIrreducibleLoops;
755 // All MemoryDefs that potentially could kill other MemDefs.
756 SmallVector<MemoryDef *, 64> MemDefs;
757 // Any that should be skipped as they are already deleted
758 SmallPtrSet<MemoryAccess *, 4> SkipStores;
759 // Keep track whether a given object is captured before return or not.
760 DenseMap<const Value *, bool> CapturedBeforeReturn;
761 // Keep track of all of the objects that are invisible to the caller after
762 // the function returns.
763 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
764 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
765 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
766 // Post-order numbers for each basic block. Used to figure out if memory
767 // accesses are executed before another access.
768 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
770 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
772 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs;
774 // Class contains self-reference, make sure it's not copied/moved.
775 DSEState(const DSEState &) = delete;
776 DSEState &operator=(const DSEState &) = delete;
778 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
779 PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
781 : F(F), AA(AA), EI(DT, LI), BatchAA(AA, &EI), MSSA(MSSA), DT(DT),
782 PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) {
783 // Collect blocks with throwing instructions not modeled in MemorySSA and
784 // alloc-like objects.
786 for (BasicBlock *BB : post_order(&F)) {
787 PostOrderNumbers[BB] = PO++;
788 for (Instruction &I : *BB) {
789 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
790 if (I.mayThrow() && !MA)
791 ThrowingBlocks.insert(I.getParent());
793 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
794 if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit &&
795 (getLocForWrite(&I) || isMemTerminatorInst(&I)))
796 MemDefs.push_back(MD);
800 // Treat byval or inalloca arguments the same as Allocas, stores to them are
801 // dead at the end of the function.
802 for (Argument &AI : F.args())
803 if (AI.hasPassPointeeByValueCopyAttr())
804 InvisibleToCallerAfterRet.insert({&AI, true});
806 // Collect whether there is any irreducible control flow in the function.
807 ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
810 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p
811 /// KillingI instruction) completely overwrites a store to the 'DeadLoc'
812 /// location (by \p DeadI instruction).
813 /// Return OW_MaybePartial if \p KillingI does not completely overwrite
814 /// \p DeadI, but they both write to the same underlying object. In that
815 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites
816 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the
817 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined.
818 OverwriteResult isOverwrite(const Instruction *KillingI,
819 const Instruction *DeadI,
820 const MemoryLocation &KillingLoc,
821 const MemoryLocation &DeadLoc,
822 int64_t &KillingOff, int64_t &DeadOff) {
823 // AliasAnalysis does not always account for loops. Limit overwrite checks
824 // to dependencies for which we can guarantee they are independent of any
825 // loops they are in.
826 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
829 const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
830 const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
831 const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
832 const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
834 // Check whether the killing store overwrites the whole object, in which
835 // case the size/offset of the dead store does not matter.
836 if (DeadUndObj == KillingUndObj && KillingLoc.Size.isPrecise()) {
837 uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
838 if (KillingUndObjSize != MemoryLocation::UnknownSize &&
839 KillingUndObjSize == KillingLoc.Size.getValue())
843 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
844 // get imprecise values here, though (except for unknown sizes).
845 if (!KillingLoc.Size.isPrecise() || !DeadLoc.Size.isPrecise()) {
846 // In case no constant size is known, try to an IR values for the number
847 // of bytes written and check if they match.
848 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
849 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
850 if (KillingMemI && DeadMemI) {
851 const Value *KillingV = KillingMemI->getLength();
852 const Value *DeadV = DeadMemI->getLength();
853 if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc))
857 // Masked stores have imprecise locations, but we can reason about them
859 return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
862 const uint64_t KillingSize = KillingLoc.Size.getValue();
863 const uint64_t DeadSize = DeadLoc.Size.getValue();
865 // Query the alias information
866 AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
868 // If the start pointers are the same, we just have to compare sizes to see if
869 // the killing store was larger than the dead store.
870 if (AAR == AliasResult::MustAlias) {
871 // Make sure that the KillingSize size is >= the DeadSize size.
872 if (KillingSize >= DeadSize)
876 // If we hit a partial alias we may have a full overwrite
877 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
878 int32_t Off = AAR.getOffset();
879 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
883 // If we can't resolve the same pointers to the same object, then we can't
884 // analyze them at all.
885 if (DeadUndObj != KillingUndObj) {
886 // Non aliasing stores to different objects don't overlap. Note that
887 // if the killing store is known to overwrite whole object (out of
888 // bounds access overwrites whole object as well) then it is assumed to
889 // completely overwrite any store to the same object even if they don't
890 // actually alias (see next check).
891 if (AAR == AliasResult::NoAlias)
896 // Okay, we have stores to two completely different pointers. Try to
897 // decompose the pointer into a "base + constant_offset" form. If the base
898 // pointers are equal, then we can reason about the two stores.
901 const Value *DeadBasePtr =
902 GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL);
903 const Value *KillingBasePtr =
904 GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL);
906 // If the base pointers still differ, we have two completely different
908 if (DeadBasePtr != KillingBasePtr)
911 // The killing access completely overlaps the dead store if and only if
912 // both start and end of the dead one is "inside" the killing one:
913 // |<->|--dead--|<->|
914 // |-----killing------|
915 // Accesses may overlap if and only if start of one of them is "inside"
917 // |<->|--dead--|<-------->|
918 // |-------killing--------|
920 // |-------dead-------|
921 // |<->|---killing---|<----->|
923 // We have to be careful here as *Off is signed while *.Size is unsigned.
925 // Check if the dead access starts "not before" the killing one.
926 if (DeadOff >= KillingOff) {
927 // If the dead access ends "not after" the killing access then the
928 // dead one is completely overwritten by the killing one.
929 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
931 // If start of the dead access is "before" end of the killing access
932 // then accesses overlap.
933 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
934 return OW_MaybePartial;
936 // If start of the killing access is "before" end of the dead access then
938 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
939 return OW_MaybePartial;
942 // Can reach here only if accesses are known not to overlap.
946 bool isInvisibleToCallerAfterRet(const Value *V) {
947 if (isa<AllocaInst>(V))
949 auto I = InvisibleToCallerAfterRet.insert({V, false});
951 if (!isInvisibleToCallerOnUnwind(V)) {
952 I.first->second = false;
953 } else if (isNoAliasCall(V)) {
954 I.first->second = !PointerMayBeCaptured(V, true, false);
957 return I.first->second;
960 bool isInvisibleToCallerOnUnwind(const Value *V) {
961 bool RequiresNoCaptureBeforeUnwind;
962 if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind))
964 if (!RequiresNoCaptureBeforeUnwind)
967 auto I = CapturedBeforeReturn.insert({V, true});
969 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
970 // with the killing MemoryDef. But we refrain from doing so for now to
971 // limit compile-time and this does not cause any changes to the number
972 // of stores removed on a large test set in practice.
973 I.first->second = PointerMayBeCaptured(V, false, true);
974 return !I.first->second;
977 Optional<MemoryLocation> getLocForWrite(Instruction *I) const {
978 if (!I->mayWriteToMemory())
981 if (auto *CB = dyn_cast<CallBase>(I))
982 return MemoryLocation::getForDest(CB, TLI);
984 return MemoryLocation::getOrNone(I);
987 /// Assuming this instruction has a dead analyzable write, can we delete
988 /// this instruction?
989 bool isRemovable(Instruction *I) {
990 assert(getLocForWrite(I) && "Must have analyzable write");
992 // Don't remove volatile/atomic stores.
993 if (StoreInst *SI = dyn_cast<StoreInst>(I))
994 return SI->isUnordered();
996 if (auto *CB = dyn_cast<CallBase>(I)) {
997 // Don't remove volatile memory intrinsics.
998 if (auto *MI = dyn_cast<MemIntrinsic>(CB))
999 return !MI->isVolatile();
1001 // Never remove dead lifetime intrinsics, e.g. because they are followed
1003 if (CB->isLifetimeStartOrEnd())
1006 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow();
1012 /// Returns true if \p UseInst completely overwrites \p DefLoc
1013 /// (stored by \p DefInst).
1014 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1015 Instruction *UseInst) {
1016 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1017 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1019 if (!UseInst->mayWriteToMemory())
1022 if (auto *CB = dyn_cast<CallBase>(UseInst))
1023 if (CB->onlyAccessesInaccessibleMemory())
1026 int64_t InstWriteOffset, DepWriteOffset;
1027 if (auto CC = getLocForWrite(UseInst))
1028 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1029 DepWriteOffset) == OW_Complete;
1033 /// Returns true if \p Def is not read before returning from the function.
1034 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1035 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("
1036 << *Def->getMemoryInst()
1037 << ") is at the end the function \n");
1039 auto MaybeLoc = getLocForWrite(Def->getMemoryInst());
1041 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n");
1045 SmallVector<MemoryAccess *, 4> WorkList;
1046 SmallPtrSet<MemoryAccess *, 8> Visited;
1047 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1048 if (!Visited.insert(Acc).second)
1050 for (Use &U : Acc->uses())
1051 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1054 for (unsigned I = 0; I < WorkList.size(); I++) {
1055 if (WorkList.size() >= MemorySSAScanLimit) {
1056 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n");
1060 MemoryAccess *UseAccess = WorkList[I];
1061 // Simply adding the users of MemoryPhi to the worklist is not enough,
1062 // because we might miss read clobbers in different iterations of a loop,
1064 // TODO: Add support for phi translation to handle the loop case.
1065 if (isa<MemoryPhi>(UseAccess))
1068 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1069 // of times this is called and/or caching it.
1070 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1071 if (isReadClobber(*MaybeLoc, UseInst)) {
1072 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n");
1076 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1077 PushMemUses(UseDef);
1082 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1083 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1084 /// indicating whether \p I is a free-like call.
1085 Optional<std::pair<MemoryLocation, bool>>
1086 getLocForTerminator(Instruction *I) const {
1089 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1091 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1093 if (auto *CB = dyn_cast<CallBase>(I)) {
1094 if (isFreeCall(I, &TLI))
1095 return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1102 /// Returns true if \p I is a memory terminator instruction like
1103 /// llvm.lifetime.end or free.
1104 bool isMemTerminatorInst(Instruction *I) const {
1105 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1106 return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1107 isFreeCall(I, &TLI);
1110 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1111 /// instruction \p AccessI.
1112 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1113 Instruction *MaybeTerm) {
1114 Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1115 getLocForTerminator(MaybeTerm);
1120 // If the terminator is a free-like call, all accesses to the underlying
1121 // object can be considered terminated.
1122 if (getUnderlyingObject(Loc.Ptr) !=
1123 getUnderlyingObject(MaybeTermLoc->first.Ptr))
1126 auto TermLoc = MaybeTermLoc->first;
1127 if (MaybeTermLoc->second) {
1128 const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1129 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1131 int64_t InstWriteOffset = 0;
1132 int64_t DepWriteOffset = 0;
1133 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1134 DepWriteOffset) == OW_Complete;
1137 // Returns true if \p Use may read from \p DefLoc.
1138 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1139 if (isNoopIntrinsic(UseInst))
1142 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1143 // treated as read clobber.
1144 if (auto SI = dyn_cast<StoreInst>(UseInst))
1145 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1147 if (!UseInst->mayReadFromMemory())
1150 if (auto *CB = dyn_cast<CallBase>(UseInst))
1151 if (CB->onlyAccessesInaccessibleMemory())
1154 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1157 /// Returns true if a dependency between \p Current and \p KillingDef is
1158 /// guaranteed to be loop invariant for the loops that they are in. Either
1159 /// because they are known to be in the same block, in the same loop level or
1160 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1161 /// during execution of the containing function.
1162 bool isGuaranteedLoopIndependent(const Instruction *Current,
1163 const Instruction *KillingDef,
1164 const MemoryLocation &CurrentLoc) {
1165 // If the dependency is within the same block or loop level (being careful
1166 // of irreducible loops), we know that AA will return a valid result for the
1167 // memory dependency. (Both at the function level, outside of any loop,
1168 // would also be valid but we currently disable that to limit compile time).
1169 if (Current->getParent() == KillingDef->getParent())
1171 const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
1172 if (!ContainsIrreducibleLoops && CurrentLI &&
1173 CurrentLI == LI.getLoopFor(KillingDef->getParent()))
1175 // Otherwise check the memory location is invariant to any loops.
1176 return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
1179 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1180 /// loop. In particular, this guarantees that it only references a single
1181 /// MemoryLocation during execution of the containing function.
1182 bool isGuaranteedLoopInvariant(const Value *Ptr) {
1183 Ptr = Ptr->stripPointerCasts();
1184 if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
1185 if (GEP->hasAllConstantIndices())
1186 Ptr = GEP->getPointerOperand()->stripPointerCasts();
1188 if (auto *I = dyn_cast<Instruction>(Ptr))
1189 return I->getParent()->isEntryBlock();
1193 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess,
1194 // with no read access between them or on any other path to a function exit
1195 // block if \p KillingLoc is not accessible after the function returns. If
1196 // there is no such MemoryDef, return None. The returned value may not
1197 // (completely) overwrite \p KillingLoc. Currently we bail out when we
1198 // encounter an aliasing MemoryUse (read).
1199 Optional<MemoryAccess *>
1200 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1201 const MemoryLocation &KillingLoc, const Value *KillingUndObj,
1202 unsigned &ScanLimit, unsigned &WalkerStepLimit,
1203 bool IsMemTerm, unsigned &PartialLimit) {
1204 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1205 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1209 MemoryAccess *Current = StartAccess;
1210 Instruction *KillingI = KillingDef->getMemoryInst();
1211 LLVM_DEBUG(dbgs() << " trying to get dominating access\n");
1213 // Only optimize defining access of KillingDef when directly starting at its
1214 // defining access. The defining access also must only access KillingLoc. At
1215 // the moment we only support instructions with a single write location, so
1216 // it should be sufficient to disable optimizations for instructions that
1217 // also read from memory.
1218 bool CanOptimize = OptimizeMemorySSA &&
1219 KillingDef->getDefiningAccess() == StartAccess &&
1220 !KillingI->mayReadFromMemory();
1222 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1223 Optional<MemoryLocation> CurrentLoc;
1224 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1226 dbgs() << " visiting " << *Current;
1227 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1228 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1234 if (MSSA.isLiveOnEntryDef(Current)) {
1235 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n");
1239 // Cost of a step. Accesses in the same block are more likely to be valid
1240 // candidates for elimination, hence consider them cheaper.
1241 unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1242 ? MemorySSASameBBStepCost
1243 : MemorySSAOtherBBStepCost;
1244 if (WalkerStepLimit <= StepCost) {
1245 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n");
1248 WalkerStepLimit -= StepCost;
1250 // Return for MemoryPhis. They cannot be eliminated directly and the
1251 // caller is responsible for traversing them.
1252 if (isa<MemoryPhi>(Current)) {
1253 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n");
1257 // Below, check if CurrentDef is a valid candidate to be eliminated by
1258 // KillingDef. If it is not, check the next candidate.
1259 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1260 Instruction *CurrentI = CurrentDef->getMemoryInst();
1262 if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
1263 CanOptimize = false;
1267 // Before we try to remove anything, check for any extra throwing
1268 // instructions that block us from DSEing
1269 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1270 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n");
1274 // Check for anything that looks like it will be a barrier to further
1276 if (isDSEBarrier(KillingUndObj, CurrentI)) {
1277 LLVM_DEBUG(dbgs() << " ... skip, barrier\n");
1281 // If Current is known to be on path that reads DefLoc or is a read
1282 // clobber, bail out, as the path is not profitable. We skip this check
1283 // for intrinsic calls, because the code knows how to handle memcpy
1285 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
1288 // Quick check if there are direct uses that are read-clobbers.
1289 if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) {
1290 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1291 return !MSSA.dominates(StartAccess, UseOrDef) &&
1292 isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1295 LLVM_DEBUG(dbgs() << " ... found a read clobber\n");
1299 // If Current does not have an analyzable write location or is not
1300 // removable, skip it.
1301 CurrentLoc = getLocForWrite(CurrentI);
1302 if (!CurrentLoc || !isRemovable(CurrentI)) {
1303 CanOptimize = false;
1307 // AliasAnalysis does not account for loops. Limit elimination to
1308 // candidates for which we can guarantee they always store to the same
1309 // memory location and not located in different loops.
1310 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1311 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n");
1312 WalkerStepLimit -= 1;
1313 CanOptimize = false;
1318 // If the killing def is a memory terminator (e.g. lifetime.end), check
1319 // the next candidate if the current Current does not write the same
1320 // underlying object as the terminator.
1321 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1322 CanOptimize = false;
1326 int64_t KillingOffset = 0;
1327 int64_t DeadOffset = 0;
1328 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1329 KillingOffset, DeadOffset);
1331 // CurrentDef is the earliest write clobber of KillingDef. Use it as
1332 // optimized access. Do not optimize if CurrentDef is already the
1333 // defining access of KillingDef.
1334 if (CurrentDef != KillingDef->getDefiningAccess() &&
1335 (OR == OW_Complete || OR == OW_MaybePartial))
1336 KillingDef->setOptimized(CurrentDef);
1338 // Once a may-aliasing def is encountered do not set an optimized
1341 CanOptimize = false;
1344 // If Current does not write to the same object as KillingDef, check
1345 // the next candidate.
1346 if (OR == OW_Unknown || OR == OW_None)
1348 else if (OR == OW_MaybePartial) {
1349 // If KillingDef only partially overwrites Current, check the next
1350 // candidate if the partial step limit is exceeded. This aggressively
1351 // limits the number of candidates for partial store elimination,
1352 // which are less likely to be removable in the end.
1353 if (PartialLimit <= 1) {
1354 WalkerStepLimit -= 1;
1355 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n");
1364 // Accesses to objects accessible after the function returns can only be
1365 // eliminated if the access is dead along all paths to the exit. Collect
1366 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1367 // they cover all paths from MaybeDeadAccess to any function exit.
1368 SmallPtrSet<Instruction *, 16> KillingDefs;
1369 KillingDefs.insert(KillingDef->getMemoryInst());
1370 MemoryAccess *MaybeDeadAccess = Current;
1371 MemoryLocation MaybeDeadLoc = *CurrentLoc;
1372 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
1373 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " ("
1374 << *MaybeDeadI << ")\n");
1376 SmallSetVector<MemoryAccess *, 32> WorkList;
1377 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1378 for (Use &U : Acc->uses())
1379 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1381 PushMemUses(MaybeDeadAccess);
1383 // Check if DeadDef may be read.
1384 for (unsigned I = 0; I < WorkList.size(); I++) {
1385 MemoryAccess *UseAccess = WorkList[I];
1387 LLVM_DEBUG(dbgs() << " " << *UseAccess);
1388 // Bail out if the number of accesses to check exceeds the scan limit.
1389 if (ScanLimit < (WorkList.size() - I)) {
1390 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1394 NumDomMemDefChecks++;
1396 if (isa<MemoryPhi>(UseAccess)) {
1397 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1398 return DT.properlyDominates(KI->getParent(),
1399 UseAccess->getBlock());
1401 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1404 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n");
1405 PushMemUses(UseAccess);
1409 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1410 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1412 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1413 return DT.dominates(KI, UseInst);
1415 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1419 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1420 // MemoryAccesses. We do not have to check it's users.
1421 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1424 << " ... skipping, memterminator invalidates following accesses\n");
1428 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1429 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n");
1430 PushMemUses(UseAccess);
1434 if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
1435 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n");
1439 // Uses which may read the original MemoryDef mean we cannot eliminate the
1440 // original MD. Stop walk.
1441 if (isReadClobber(MaybeDeadLoc, UseInst)) {
1442 LLVM_DEBUG(dbgs() << " ... found read clobber\n");
1446 // If this worklist walks back to the original memory access (and the
1447 // pointer is not guarenteed loop invariant) then we cannot assume that a
1448 // store kills itself.
1449 if (MaybeDeadAccess == UseAccess &&
1450 !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) {
1451 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n");
1454 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
1455 // if it reads the memory location.
1456 // TODO: It would probably be better to check for self-reads before
1457 // calling the function.
1458 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1459 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n");
1463 // Check all uses for MemoryDefs, except for defs completely overwriting
1464 // the original location. Otherwise we have to check uses of *all*
1465 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1466 // miss cases like the following
1467 // 1 = Def(LoE) ; <----- DeadDef stores [0,1]
1468 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1469 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1470 // (The Use points to the *first* Def it may alias)
1471 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1473 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1474 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1475 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1476 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1477 PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) {
1478 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1480 << " ... found killing def " << *UseInst << "\n");
1481 KillingDefs.insert(UseInst);
1485 << " ... found preceeding def " << *UseInst << "\n");
1489 PushMemUses(UseDef);
1493 // For accesses to locations visible after the function returns, make sure
1494 // that the location is dead (=overwritten) along all paths from
1495 // MaybeDeadAccess to the exit.
1496 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1497 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1498 for (Instruction *KD : KillingDefs)
1499 KillingBlocks.insert(KD->getParent());
1500 assert(!KillingBlocks.empty() &&
1501 "Expected at least a single killing block");
1503 // Find the common post-dominator of all killing blocks.
1504 BasicBlock *CommonPred = *KillingBlocks.begin();
1505 for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) {
1508 CommonPred = PDT.findNearestCommonDominator(CommonPred, BB);
1511 // If CommonPred is in the set of killing blocks, just check if it
1512 // post-dominates MaybeDeadAccess.
1513 if (KillingBlocks.count(CommonPred)) {
1514 if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock()))
1515 return {MaybeDeadAccess};
1519 // If the common post-dominator does not post-dominate MaybeDeadAccess,
1520 // there is a path from MaybeDeadAccess to an exit not going through a
1522 if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) {
1523 SetVector<BasicBlock *> WorkList;
1525 // If CommonPred is null, there are multiple exits from the function.
1526 // They all have to be added to the worklist.
1528 WorkList.insert(CommonPred);
1530 for (BasicBlock *R : PDT.roots())
1534 // Check if all paths starting from an exit node go through one of the
1535 // killing blocks before reaching MaybeDeadAccess.
1536 for (unsigned I = 0; I < WorkList.size(); I++) {
1538 BasicBlock *Current = WorkList[I];
1539 if (KillingBlocks.count(Current))
1541 if (Current == MaybeDeadAccess->getBlock())
1544 // MaybeDeadAccess is reachable from the entry, so we don't have to
1545 // explore unreachable blocks further.
1546 if (!DT.isReachableFromEntry(Current))
1549 for (BasicBlock *Pred : predecessors(Current))
1550 WorkList.insert(Pred);
1552 if (WorkList.size() >= MemorySSAPathCheckLimit)
1556 return {MaybeDeadAccess};
1561 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
1562 // potentially dead.
1563 return {MaybeDeadAccess};
1566 // Delete dead memory defs
1567 void deleteDeadInstruction(Instruction *SI) {
1568 MemorySSAUpdater Updater(&MSSA);
1569 SmallVector<Instruction *, 32> NowDeadInsts;
1570 NowDeadInsts.push_back(SI);
1573 while (!NowDeadInsts.empty()) {
1574 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1577 // Try to preserve debug information attached to the dead instruction.
1578 salvageDebugInfo(*DeadInst);
1579 salvageKnowledge(DeadInst);
1581 // Remove the Instruction from MSSA.
1582 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1583 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1584 SkipStores.insert(MD);
1587 Updater.removeMemoryAccess(MA);
1590 auto I = IOLs.find(DeadInst->getParent());
1591 if (I != IOLs.end())
1592 I->second.erase(DeadInst);
1593 // Remove its operands
1594 for (Use &O : DeadInst->operands())
1595 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1597 if (isInstructionTriviallyDead(OpI, &TLI))
1598 NowDeadInsts.push_back(OpI);
1601 EI.removeInstruction(DeadInst);
1602 DeadInst->eraseFromParent();
1606 // Check for any extra throws between \p KillingI and \p DeadI that block
1607 // DSE. This only checks extra maythrows (those that aren't MemoryDef's).
1608 // MemoryDef that may throw are handled during the walk from one def to the
1610 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
1611 const Value *KillingUndObj) {
1612 // First see if we can ignore it by using the fact that KillingI is an
1613 // alloca/alloca like object that is not visible to the caller during
1614 // execution of the function.
1615 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
1618 if (KillingI->getParent() == DeadI->getParent())
1619 return ThrowingBlocks.count(KillingI->getParent());
1620 return !ThrowingBlocks.empty();
1623 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
1624 // instructions act as barriers:
1625 // * A memory instruction that may throw and \p KillingI accesses a non-stack
1627 // * Atomic stores stronger that monotonic.
1628 bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
1629 // If DeadI may throw it acts as a barrier, unless we are to an
1630 // alloca/alloca like object that does not escape.
1631 if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
1634 // If DeadI is an atomic load/store stronger than monotonic, do not try to
1635 // eliminate/reorder it.
1636 if (DeadI->isAtomic()) {
1637 if (auto *LI = dyn_cast<LoadInst>(DeadI))
1638 return isStrongerThanMonotonic(LI->getOrdering());
1639 if (auto *SI = dyn_cast<StoreInst>(DeadI))
1640 return isStrongerThanMonotonic(SI->getOrdering());
1641 if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
1642 return isStrongerThanMonotonic(ARMW->getOrdering());
1643 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
1644 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1645 isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1646 llvm_unreachable("other instructions should be skipped in MemorySSA");
1651 /// Eliminate writes to objects that are not visible in the caller and are not
1652 /// accessed before returning from the function.
1653 bool eliminateDeadWritesAtEndOfFunction() {
1654 bool MadeChange = false;
1657 << "Trying to eliminate MemoryDefs at the end of the function\n");
1658 for (MemoryDef *Def : llvm::reverse(MemDefs)) {
1659 if (SkipStores.contains(Def))
1662 Instruction *DefI = Def->getMemoryInst();
1663 auto DefLoc = getLocForWrite(DefI);
1664 if (!DefLoc || !isRemovable(DefI))
1667 // NOTE: Currently eliminating writes at the end of a function is limited
1668 // to MemoryDefs with a single underlying object, to save compile-time. In
1669 // practice it appears the case with multiple underlying objects is very
1670 // uncommon. If it turns out to be important, we can use
1671 // getUnderlyingObjects here instead.
1672 const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1673 if (!isInvisibleToCallerAfterRet(UO))
1676 if (isWriteAtEndOfFunction(Def)) {
1677 // See through pointer-to-pointer bitcasts
1678 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
1679 "of the function\n");
1680 deleteDeadInstruction(DefI);
1688 /// If we have a zero initializing memset following a call to malloc,
1689 /// try folding it into a call to calloc.
1690 bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) {
1691 Instruction *DefI = Def->getMemoryInst();
1692 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
1694 // TODO: Could handle zero store to small allocation as well.
1696 Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1697 if (!StoredConstant || !StoredConstant->isNullValue())
1700 if (!isRemovable(DefI))
1701 // The memset might be volatile..
1704 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
1705 F.hasFnAttribute(Attribute::SanitizeAddress) ||
1706 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
1707 F.getName() == "calloc")
1709 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO));
1712 auto *InnerCallee = Malloc->getCalledFunction();
1716 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
1717 Func != LibFunc_malloc)
1720 auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
1721 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
1723 auto *MallocBB = Malloc->getParent(),
1724 *MemsetBB = Memset->getParent();
1725 if (MallocBB == MemsetBB)
1727 auto *Ptr = Memset->getArgOperand(0);
1728 auto *TI = MallocBB->getTerminator();
1729 ICmpInst::Predicate Pred;
1730 BasicBlock *TrueBB, *FalseBB;
1731 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB,
1734 if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB)
1739 if (Malloc->getOperand(0) != MemSet->getLength())
1741 if (!shouldCreateCalloc(Malloc, MemSet) ||
1742 !DT.dominates(Malloc, MemSet) ||
1743 !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT))
1745 IRBuilder<> IRB(Malloc);
1746 const auto &DL = Malloc->getModule()->getDataLayout();
1748 emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1),
1749 Malloc->getArgOperand(0), IRB, TLI);
1752 MemorySSAUpdater Updater(&MSSA);
1754 cast<MemoryDef>(Updater.getMemorySSA()->getMemoryAccess(Malloc));
1756 Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), LastDef,
1758 auto *NewAccessMD = cast<MemoryDef>(NewAccess);
1759 Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
1760 Updater.removeMemoryAccess(Malloc);
1761 Malloc->replaceAllUsesWith(Calloc);
1762 Malloc->eraseFromParent();
1766 /// \returns true if \p Def is a no-op store, either because it
1767 /// directly stores back a loaded value or stores zero to a calloced object.
1768 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
1769 Instruction *DefI = Def->getMemoryInst();
1770 StoreInst *Store = dyn_cast<StoreInst>(DefI);
1771 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
1772 Constant *StoredConstant = nullptr;
1774 StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1776 StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1780 if (!isRemovable(DefI))
1783 if (StoredConstant && isAllocationFn(DefUO, &TLI)) {
1784 auto *CB = cast<CallBase>(DefUO);
1785 auto *InitC = getInitialValueOfAllocation(CB, &TLI,
1786 StoredConstant->getType());
1787 // If the clobbering access is LiveOnEntry, no instructions between them
1788 // can modify the memory location.
1789 if (InitC && InitC == StoredConstant)
1790 return MSSA.isLiveOnEntryDef(
1791 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def));
1797 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1798 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1799 // Get the defining access for the load.
1800 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1801 // Fast path: the defining accesses are the same.
1802 if (LoadAccess == Def->getDefiningAccess())
1805 // Look through phi accesses. Recursively scan all phi accesses by
1806 // adding them to a worklist. Bail when we run into a memory def that
1807 // does not match LoadAccess.
1808 SetVector<MemoryAccess *> ToCheck;
1809 MemoryAccess *Current =
1810 MSSA.getWalker()->getClobberingMemoryAccess(Def);
1811 // We don't want to bail when we run into the store memory def. But,
1812 // the phi access may point to it. So, pretend like we've already
1814 ToCheck.insert(Def);
1815 ToCheck.insert(Current);
1816 // Start at current (1) to simulate already having checked Def.
1817 for (unsigned I = 1; I < ToCheck.size(); ++I) {
1818 Current = ToCheck[I];
1819 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1820 // Check all the operands.
1821 for (auto &Use : PhiAccess->incoming_values())
1822 ToCheck.insert(cast<MemoryAccess>(&Use));
1826 // If we found a memory def, bail. This happens when we have an
1827 // unrelated write in between an otherwise noop store.
1828 assert(isa<MemoryDef>(Current) &&
1829 "Only MemoryDefs should reach here.");
1830 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1831 // We are searching for the definition of the store's destination.
1832 // So, if that is the same definition as the load, then this is a
1833 // noop. Otherwise, fail.
1834 if (LoadAccess != Current)
1844 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) {
1845 bool Changed = false;
1846 for (auto OI : IOL) {
1847 Instruction *DeadI = OI.first;
1848 MemoryLocation Loc = *getLocForWrite(DeadI);
1849 assert(isRemovable(DeadI) && "Expect only removable instruction");
1851 const Value *Ptr = Loc.Ptr->stripPointerCasts();
1852 int64_t DeadStart = 0;
1853 uint64_t DeadSize = Loc.Size.getValue();
1854 GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL);
1855 OverlapIntervalsTy &IntervalMap = OI.second;
1856 Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize);
1857 if (IntervalMap.empty())
1859 Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize);
1864 /// Eliminates writes to locations where the value that is being written
1865 /// is already stored at the same location.
1866 bool eliminateRedundantStoresOfExistingValues() {
1867 bool MadeChange = false;
1868 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
1869 "already existing value\n");
1870 for (auto *Def : MemDefs) {
1871 if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def))
1874 Instruction *DefInst = Def->getMemoryInst();
1875 auto MaybeDefLoc = getLocForWrite(DefInst);
1876 if (!MaybeDefLoc || !isRemovable(DefInst))
1879 MemoryDef *UpperDef;
1880 // To conserve compile-time, we avoid walking to the next clobbering def.
1881 // Instead, we just try to get the optimized access, if it exists. DSE
1882 // will try to optimize defs during the earlier traversal.
1883 if (Def->isOptimized())
1884 UpperDef = dyn_cast<MemoryDef>(Def->getOptimized());
1886 UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess());
1887 if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef))
1890 Instruction *UpperInst = UpperDef->getMemoryInst();
1891 auto IsRedundantStore = [&]() {
1892 if (DefInst->isIdenticalTo(UpperInst))
1894 if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
1895 if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
1896 // MemSetInst must have a write location.
1897 MemoryLocation UpperLoc = *getLocForWrite(UpperInst);
1898 int64_t InstWriteOffset = 0;
1899 int64_t DepWriteOffset = 0;
1900 auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc,
1901 InstWriteOffset, DepWriteOffset);
1902 Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
1903 return StoredByte && StoredByte == MemSetI->getOperand(1) &&
1910 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
1912 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst
1914 deleteDeadInstruction(DefInst);
1915 NumRedundantStores++;
1922 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1923 DominatorTree &DT, PostDominatorTree &PDT,
1924 const TargetLibraryInfo &TLI,
1925 const LoopInfo &LI) {
1926 bool MadeChange = false;
1928 DSEState State(F, AA, MSSA, DT, PDT, TLI, LI);
1930 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1931 MemoryDef *KillingDef = State.MemDefs[I];
1932 if (State.SkipStores.count(KillingDef))
1934 Instruction *KillingI = KillingDef->getMemoryInst();
1936 Optional<MemoryLocation> MaybeKillingLoc;
1937 if (State.isMemTerminatorInst(KillingI))
1938 MaybeKillingLoc = State.getLocForTerminator(KillingI).map(
1939 [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1941 MaybeKillingLoc = State.getLocForWrite(KillingI);
1943 if (!MaybeKillingLoc) {
1944 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
1945 << *KillingI << "\n");
1948 MemoryLocation KillingLoc = *MaybeKillingLoc;
1949 assert(KillingLoc.Ptr && "KillingLoc should not be null");
1950 const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr);
1951 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
1952 << *KillingDef << " (" << *KillingI << ")\n");
1954 unsigned ScanLimit = MemorySSAScanLimit;
1955 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
1956 unsigned PartialLimit = MemorySSAPartialStoreLimit;
1957 // Worklist of MemoryAccesses that may be killed by KillingDef.
1958 SetVector<MemoryAccess *> ToCheck;
1959 ToCheck.insert(KillingDef->getDefiningAccess());
1961 bool Shortend = false;
1962 bool IsMemTerm = State.isMemTerminatorInst(KillingI);
1963 // Check if MemoryAccesses in the worklist are killed by KillingDef.
1964 for (unsigned I = 0; I < ToCheck.size(); I++) {
1965 MemoryAccess *Current = ToCheck[I];
1966 if (State.SkipStores.count(Current))
1969 Optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef(
1970 KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit,
1971 WalkerStepLimit, IsMemTerm, PartialLimit);
1973 if (!MaybeDeadAccess) {
1974 LLVM_DEBUG(dbgs() << " finished walk\n");
1978 MemoryAccess *DeadAccess = *MaybeDeadAccess;
1979 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess);
1980 if (isa<MemoryPhi>(DeadAccess)) {
1981 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n");
1982 for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) {
1983 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
1984 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
1985 BasicBlock *PhiBlock = DeadAccess->getBlock();
1987 // We only consider incoming MemoryAccesses that come before the
1988 // MemoryPhi. Otherwise we could discover candidates that do not
1989 // strictly dominate our starting def.
1990 if (State.PostOrderNumbers[IncomingBlock] >
1991 State.PostOrderNumbers[PhiBlock])
1992 ToCheck.insert(IncomingAccess);
1996 auto *DeadDefAccess = cast<MemoryDef>(DeadAccess);
1997 Instruction *DeadI = DeadDefAccess->getMemoryInst();
1998 LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n");
1999 ToCheck.insert(DeadDefAccess->getDefiningAccess());
2000 NumGetDomMemoryDefPassed++;
2002 if (!DebugCounter::shouldExecute(MemorySSACounter))
2005 MemoryLocation DeadLoc = *State.getLocForWrite(DeadI);
2008 const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr);
2009 if (KillingUndObj != DeadUndObj)
2011 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI
2012 << "\n KILLER: " << *KillingI << '\n');
2013 State.deleteDeadInstruction(DeadI);
2017 // Check if DeadI overwrites KillingI.
2018 int64_t KillingOffset = 0;
2019 int64_t DeadOffset = 0;
2020 OverwriteResult OR = State.isOverwrite(
2021 KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset);
2022 if (OR == OW_MaybePartial) {
2023 auto Iter = State.IOLs.insert(
2024 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2025 DeadI->getParent(), InstOverlapIntervalsTy()));
2026 auto &IOL = Iter.first->second;
2027 OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset,
2028 DeadOffset, DeadI, IOL);
2031 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2032 auto *DeadSI = dyn_cast<StoreInst>(DeadI);
2033 auto *KillingSI = dyn_cast<StoreInst>(KillingI);
2034 // We are re-using tryToMergePartialOverlappingStores, which requires
2035 // DeadSI to dominate DeadSI.
2036 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2037 if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) {
2038 if (Constant *Merged = tryToMergePartialOverlappingStores(
2039 KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL,
2040 State.BatchAA, &DT)) {
2042 // Update stored value of earlier store to merged constant.
2043 DeadSI->setOperand(0, Merged);
2044 ++NumModifiedStores;
2048 // Remove killing store and remove any outstanding overlap
2049 // intervals for the updated store.
2050 State.deleteDeadInstruction(KillingSI);
2051 auto I = State.IOLs.find(DeadSI->getParent());
2052 if (I != State.IOLs.end())
2053 I->second.erase(DeadSI);
2059 if (OR == OW_Complete) {
2060 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI
2061 << "\n KILLER: " << *KillingI << '\n');
2062 State.deleteDeadInstruction(DeadI);
2069 // Check if the store is a no-op.
2070 if (!Shortend && State.storeIsNoop(KillingDef, KillingUndObj)) {
2071 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI
2073 State.deleteDeadInstruction(KillingI);
2074 NumRedundantStores++;
2079 // Can we form a calloc from a memset/malloc pair?
2080 if (!Shortend && State.tryFoldIntoCalloc(KillingDef, KillingUndObj)) {
2081 LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n"
2082 << " DEAD: " << *KillingI << '\n');
2083 State.deleteDeadInstruction(KillingI);
2089 if (EnablePartialOverwriteTracking)
2090 for (auto &KV : State.IOLs)
2091 MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2093 MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2094 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2097 } // end anonymous namespace
2099 //===----------------------------------------------------------------------===//
2101 //===----------------------------------------------------------------------===//
2102 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2103 AliasAnalysis &AA = AM.getResult<AAManager>(F);
2104 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2105 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2106 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2107 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2108 LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
2110 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2112 #ifdef LLVM_ENABLE_STATS
2113 if (AreStatisticsEnabled())
2114 for (auto &I : instructions(F))
2115 NumRemainingStores += isa<StoreInst>(&I);
2119 return PreservedAnalyses::all();
2121 PreservedAnalyses PA;
2122 PA.preserveSet<CFGAnalyses>();
2123 PA.preserve<MemorySSAAnalysis>();
2124 PA.preserve<LoopAnalysis>();
2130 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2131 class DSELegacyPass : public FunctionPass {
2133 static char ID; // Pass identification, replacement for typeid
2135 DSELegacyPass() : FunctionPass(ID) {
2136 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2139 bool runOnFunction(Function &F) override {
2140 if (skipFunction(F))
2143 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2144 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2145 const TargetLibraryInfo &TLI =
2146 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2147 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2148 PostDominatorTree &PDT =
2149 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2150 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2152 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2154 #ifdef LLVM_ENABLE_STATS
2155 if (AreStatisticsEnabled())
2156 for (auto &I : instructions(F))
2157 NumRemainingStores += isa<StoreInst>(&I);
2163 void getAnalysisUsage(AnalysisUsage &AU) const override {
2164 AU.setPreservesCFG();
2165 AU.addRequired<AAResultsWrapperPass>();
2166 AU.addRequired<TargetLibraryInfoWrapperPass>();
2167 AU.addPreserved<GlobalsAAWrapperPass>();
2168 AU.addRequired<DominatorTreeWrapperPass>();
2169 AU.addPreserved<DominatorTreeWrapperPass>();
2170 AU.addRequired<PostDominatorTreeWrapperPass>();
2171 AU.addRequired<MemorySSAWrapperPass>();
2172 AU.addPreserved<PostDominatorTreeWrapperPass>();
2173 AU.addPreserved<MemorySSAWrapperPass>();
2174 AU.addRequired<LoopInfoWrapperPass>();
2175 AU.addPreserved<LoopInfoWrapperPass>();
2179 } // end anonymous namespace
2181 char DSELegacyPass::ID = 0;
2183 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2185 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2186 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2187 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2188 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2189 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2190 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2191 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2192 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2193 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2196 FunctionPass *llvm::createDeadStoreEliminationPass() {
2197 return new DSELegacyPass();