1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/iterator_range.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/GlobalsModRef.h"
22 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Argument.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GetElementPtrTypeIterator.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/InstrTypes.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Operator.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/User.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/MathExtras.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/Transforms/Scalar.h"
52 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
53 #include "llvm/Transforms/Utils/Local.h"
60 #define DEBUG_TYPE "memcpyopt"
62 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
63 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
64 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
65 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
67 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
68 bool &VariableIdxFound,
69 const DataLayout &DL) {
70 // Skip over the first indices.
71 gep_type_iterator GTI = gep_type_begin(GEP);
72 for (unsigned i = 1; i != Idx; ++i, ++GTI)
75 // Compute the offset implied by the rest of the indices.
77 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
78 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
80 return VariableIdxFound = true;
81 if (OpC->isZero()) continue; // No offset.
83 // Handle struct indices, which add their field offset to the pointer.
84 if (StructType *STy = GTI.getStructTypeOrNull()) {
85 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
89 // Otherwise, we have a sequential type like an array or vector. Multiply
90 // the index by the ElementSize.
91 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
92 Offset += Size*OpC->getSExtValue();
98 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
99 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
100 /// might be &A[40]. In this case offset would be -8.
101 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
102 const DataLayout &DL) {
103 Ptr1 = Ptr1->stripPointerCasts();
104 Ptr2 = Ptr2->stripPointerCasts();
106 // Handle the trivial case first.
112 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
113 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
115 bool VariableIdxFound = false;
117 // If one pointer is a GEP and the other isn't, then see if the GEP is a
118 // constant offset from the base, as in "P" and "gep P, 1".
119 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
120 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
121 return !VariableIdxFound;
124 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
125 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
126 return !VariableIdxFound;
129 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
130 // base. After that base, they may have some number of common (and
131 // potentially variable) indices. After that they handle some constant
132 // offset, which determines their offset from each other. At this point, we
133 // handle no other case.
134 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
137 // Skip any common indices and track the GEP types.
139 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
140 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
143 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
144 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
145 if (VariableIdxFound) return false;
147 Offset = Offset2-Offset1;
153 /// Represents a range of memset'd bytes with the ByteVal value.
154 /// This allows us to analyze stores like:
159 /// which sometimes happens with stores to arrays of structs etc. When we see
160 /// the first store, we make a range [1, 2). The second store extends the range
161 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
162 /// two ranges into [0, 3) which is memset'able.
164 // Start/End - A semi range that describes the span that this range covers.
165 // The range is closed at the start and open at the end: [Start, End).
168 /// StartPtr - The getelementptr instruction that points to the start of the
172 /// Alignment - The known alignment of the first store.
175 /// TheStores - The actual stores that make up this range.
176 SmallVector<Instruction*, 16> TheStores;
178 bool isProfitableToUseMemset(const DataLayout &DL) const;
181 } // end anonymous namespace
183 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
184 // If we found more than 4 stores to merge or 16 bytes, use memset.
185 if (TheStores.size() >= 4 || End-Start >= 16) return true;
187 // If there is nothing to merge, don't do anything.
188 if (TheStores.size() < 2) return false;
190 // If any of the stores are a memset, then it is always good to extend the
192 for (Instruction *SI : TheStores)
193 if (!isa<StoreInst>(SI))
196 // Assume that the code generator is capable of merging pairs of stores
197 // together if it wants to.
198 if (TheStores.size() == 2) return false;
200 // If we have fewer than 8 stores, it can still be worthwhile to do this.
201 // For example, merging 4 i8 stores into an i32 store is useful almost always.
202 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
203 // memset will be split into 2 32-bit stores anyway) and doing so can
204 // pessimize the llvm optimizer.
206 // Since we don't have perfect knowledge here, make some assumptions: assume
207 // the maximum GPR width is the same size as the largest legal integer
208 // size. If so, check to see whether we will end up actually reducing the
209 // number of stores used.
210 unsigned Bytes = unsigned(End-Start);
211 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
214 unsigned NumPointerStores = Bytes / MaxIntSize;
216 // Assume the remaining bytes if any are done a byte at a time.
217 unsigned NumByteStores = Bytes % MaxIntSize;
219 // If we will reduce the # stores (according to this heuristic), do the
220 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
222 return TheStores.size() > NumPointerStores+NumByteStores;
228 /// A sorted list of the memset ranges.
229 SmallVector<MemsetRange, 8> Ranges;
230 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator;
231 const DataLayout &DL;
234 MemsetRanges(const DataLayout &DL) : DL(DL) {}
236 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator;
237 const_iterator begin() const { return Ranges.begin(); }
238 const_iterator end() const { return Ranges.end(); }
239 bool empty() const { return Ranges.empty(); }
241 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
242 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
243 addStore(OffsetFromFirst, SI);
245 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
248 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
249 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
251 addRange(OffsetFromFirst, StoreSize,
252 SI->getPointerOperand(), SI->getAlignment(), SI);
255 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
256 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
257 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
260 void addRange(int64_t Start, int64_t Size, Value *Ptr,
261 unsigned Alignment, Instruction *Inst);
265 } // end anonymous namespace
267 /// Add a new store to the MemsetRanges data structure. This adds a
268 /// new range for the specified store at the specified offset, merging into
269 /// existing ranges as appropriate.
270 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
271 unsigned Alignment, Instruction *Inst) {
272 int64_t End = Start+Size;
274 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start,
275 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; });
277 // We now know that I == E, in which case we didn't find anything to merge
278 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
279 // to insert a new range. Handle this now.
280 if (I == Ranges.end() || End < I->Start) {
281 MemsetRange &R = *Ranges.insert(I, MemsetRange());
285 R.Alignment = Alignment;
286 R.TheStores.push_back(Inst);
290 // This store overlaps with I, add it.
291 I->TheStores.push_back(Inst);
293 // At this point, we may have an interval that completely contains our store.
294 // If so, just add it to the interval and return.
295 if (I->Start <= Start && I->End >= End)
298 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
299 // but is not entirely contained within the range.
301 // See if the range extends the start of the range. In this case, it couldn't
302 // possibly cause it to join the prior range, because otherwise we would have
304 if (Start < I->Start) {
307 I->Alignment = Alignment;
310 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
311 // is in or right at the end of I), and that End >= I->Start. Extend I out to
315 range_iterator NextI = I;
316 while (++NextI != Ranges.end() && End >= NextI->Start) {
317 // Merge the range in.
318 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
319 if (NextI->End > I->End)
327 //===----------------------------------------------------------------------===//
328 // MemCpyOptLegacyPass Pass
329 //===----------------------------------------------------------------------===//
333 class MemCpyOptLegacyPass : public FunctionPass {
337 static char ID; // Pass identification, replacement for typeid
339 MemCpyOptLegacyPass() : FunctionPass(ID) {
340 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
343 bool runOnFunction(Function &F) override;
346 // This transformation requires dominator postdominator info
347 void getAnalysisUsage(AnalysisUsage &AU) const override {
348 AU.setPreservesCFG();
349 AU.addRequired<AssumptionCacheTracker>();
350 AU.addRequired<DominatorTreeWrapperPass>();
351 AU.addRequired<MemoryDependenceWrapperPass>();
352 AU.addRequired<AAResultsWrapperPass>();
353 AU.addRequired<TargetLibraryInfoWrapperPass>();
354 AU.addPreserved<GlobalsAAWrapperPass>();
355 AU.addPreserved<MemoryDependenceWrapperPass>();
359 char MemCpyOptLegacyPass::ID = 0;
361 } // end anonymous namespace
363 /// The public interface to this file...
364 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
366 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
368 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
369 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
370 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
371 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
372 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
373 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
374 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
377 /// When scanning forward over instructions, we look for some other patterns to
378 /// fold away. In particular, this looks for stores to neighboring locations of
379 /// memory. If it sees enough consecutive ones, it attempts to merge them
380 /// together into a memcpy/memset.
381 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
384 const DataLayout &DL = StartInst->getModule()->getDataLayout();
386 // Okay, so we now have a single store that can be splatable. Scan to find
387 // all subsequent stores of the same value to offset from the same pointer.
388 // Join these together into ranges, so we can decide whether contiguous blocks
390 MemsetRanges Ranges(DL);
392 BasicBlock::iterator BI(StartInst);
393 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
394 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
395 // If the instruction is readnone, ignore it, otherwise bail out. We
396 // don't even allow readonly here because we don't want something like:
397 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
398 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
403 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
404 // If this is a store, see if we can merge it in.
405 if (!NextStore->isSimple()) break;
407 // Check to see if this stored value is of the same byte-splattable value.
408 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
411 // Check to see if this store is to a constant offset from the start ptr.
413 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
417 Ranges.addStore(Offset, NextStore);
419 MemSetInst *MSI = cast<MemSetInst>(BI);
421 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
422 !isa<ConstantInt>(MSI->getLength()))
425 // Check to see if this store is to a constant offset from the start ptr.
427 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
430 Ranges.addMemSet(Offset, MSI);
434 // If we have no ranges, then we just had a single store with nothing that
435 // could be merged in. This is a very common case of course.
439 // If we had at least one store that could be merged in, add the starting
440 // store as well. We try to avoid this unless there is at least something
441 // interesting as a small compile-time optimization.
442 Ranges.addInst(0, StartInst);
444 // If we create any memsets, we put it right before the first instruction that
445 // isn't part of the memset block. This ensure that the memset is dominated
446 // by any addressing instruction needed by the start of the block.
447 IRBuilder<> Builder(&*BI);
449 // Now that we have full information about ranges, loop over the ranges and
450 // emit memset's for anything big enough to be worthwhile.
451 Instruction *AMemSet = nullptr;
452 for (const MemsetRange &Range : Ranges) {
454 if (Range.TheStores.size() == 1) continue;
456 // If it is profitable to lower this range to memset, do so now.
457 if (!Range.isProfitableToUseMemset(DL))
460 // Otherwise, we do want to transform this! Create a new memset.
461 // Get the starting pointer of the block.
462 StartPtr = Range.StartPtr;
464 // Determine alignment
465 unsigned Alignment = Range.Alignment;
466 if (Alignment == 0) {
468 cast<PointerType>(StartPtr->getType())->getElementType();
469 Alignment = DL.getABITypeAlignment(EltType);
473 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
475 DEBUG(dbgs() << "Replace stores:\n";
476 for (Instruction *SI : Range.TheStores)
477 dbgs() << *SI << '\n';
478 dbgs() << "With: " << *AMemSet << '\n');
480 if (!Range.TheStores.empty())
481 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
483 // Zap all the stores.
484 for (Instruction *SI : Range.TheStores) {
485 MD->removeInstruction(SI);
486 SI->eraseFromParent();
494 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
495 const LoadInst *LI) {
496 unsigned StoreAlign = SI->getAlignment();
498 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
499 unsigned LoadAlign = LI->getAlignment();
501 LoadAlign = DL.getABITypeAlignment(LI->getType());
503 return std::min(StoreAlign, LoadAlign);
506 // This method try to lift a store instruction before position P.
507 // It will lift the store and its argument + that anything that
508 // may alias with these.
509 // The method returns true if it was successful.
510 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
511 const LoadInst *LI) {
512 // If the store alias this position, early bail out.
513 MemoryLocation StoreLoc = MemoryLocation::get(SI);
514 if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef)
517 // Keep track of the arguments of all instruction we plan to lift
518 // so we can make sure to lift them as well if apropriate.
519 DenseSet<Instruction*> Args;
520 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
521 if (Ptr->getParent() == SI->getParent())
524 // Instruction to lift before P.
525 SmallVector<Instruction*, 8> ToLift;
527 // Memory locations of lifted instructions.
528 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
531 SmallVector<ImmutableCallSite, 8> CallSites;
533 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
535 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
538 bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef;
540 bool NeedLift = false;
544 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
545 return AA.getModRefInfo(C, ML);
550 llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
551 return AA.getModRefInfo(C, CS);
559 // Since LI is implicitly moved downwards past the lifted instructions,
560 // none of them may modify its source.
561 if (AA.getModRefInfo(C, LoadLoc) & MRI_Mod)
563 else if (auto CS = ImmutableCallSite(C)) {
564 // If we can't lift this before P, it's game over.
565 if (AA.getModRefInfo(P, CS) != MRI_NoModRef)
568 CallSites.push_back(CS);
569 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
570 // If we can't lift this before P, it's game over.
571 auto ML = MemoryLocation::get(C);
572 if (AA.getModRefInfo(P, ML) != MRI_NoModRef)
575 MemLocs.push_back(ML);
577 // We don't know how to lift this instruction.
582 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
583 if (auto *A = dyn_cast<Instruction>(C->getOperand(k)))
584 if (A->getParent() == SI->getParent())
588 // We made it, we need to lift
589 for (auto *I : llvm::reverse(ToLift)) {
590 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
597 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
598 if (!SI->isSimple()) return false;
600 // Avoid merging nontemporal stores since the resulting
601 // memcpy/memset would not be able to preserve the nontemporal hint.
602 // In theory we could teach how to propagate the !nontemporal metadata to
603 // memset calls. However, that change would force the backend to
604 // conservatively expand !nontemporal memset calls back to sequences of
605 // store instructions (effectively undoing the merging).
606 if (SI->getMetadata(LLVMContext::MD_nontemporal))
609 const DataLayout &DL = SI->getModule()->getDataLayout();
611 // Load to store forwarding can be interpreted as memcpy.
612 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
613 if (LI->isSimple() && LI->hasOneUse() &&
614 LI->getParent() == SI->getParent()) {
616 auto *T = LI->getType();
617 if (T->isAggregateType()) {
618 AliasAnalysis &AA = LookupAliasAnalysis();
619 MemoryLocation LoadLoc = MemoryLocation::get(LI);
621 // We use alias analysis to check if an instruction may store to
622 // the memory we load from in between the load and the store. If
623 // such an instruction is found, we try to promote there instead
624 // of at the store position.
626 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
627 if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) {
633 // We found an instruction that may write to the loaded memory.
634 // We can try to promote at this position instead of the store
635 // position if nothing alias the store memory after this and the store
636 // destination is not in the range.
638 if (!moveUp(AA, SI, P, LI))
642 // If a valid insertion position is found, then we can promote
643 // the load/store pair to a memcpy.
645 // If we load from memory that may alias the memory we store to,
646 // memmove must be used to preserve semantic. If not, memcpy can
648 bool UseMemMove = false;
649 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
652 unsigned Align = findCommonAlignment(DL, SI, LI);
653 uint64_t Size = DL.getTypeStoreSize(T);
655 IRBuilder<> Builder(P);
658 M = Builder.CreateMemMove(SI->getPointerOperand(),
659 LI->getPointerOperand(), Size,
660 Align, SI->isVolatile());
662 M = Builder.CreateMemCpy(SI->getPointerOperand(),
663 LI->getPointerOperand(), Size,
664 Align, SI->isVolatile());
666 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI
667 << " => " << *M << "\n");
669 MD->removeInstruction(SI);
670 SI->eraseFromParent();
671 MD->removeInstruction(LI);
672 LI->eraseFromParent();
675 // Make sure we do not invalidate the iterator.
676 BBI = M->getIterator();
681 // Detect cases where we're performing call slot forwarding, but
682 // happen to be using a load-store pair to implement it, rather than
684 MemDepResult ldep = MD->getDependency(LI);
685 CallInst *C = nullptr;
686 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
687 C = dyn_cast<CallInst>(ldep.getInst());
690 // Check that nothing touches the dest of the "copy" between
691 // the call and the store.
692 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
693 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
694 AliasAnalysis &AA = LookupAliasAnalysis();
695 MemoryLocation StoreLoc = MemoryLocation::get(SI);
696 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
698 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
702 // The store to dest may never happen if an exception can be thrown
703 // between the load and the store.
704 if (I->mayThrow() && !CpyDestIsLocal) {
712 bool changed = performCallSlotOptzn(
713 LI, SI->getPointerOperand()->stripPointerCasts(),
714 LI->getPointerOperand()->stripPointerCasts(),
715 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
716 findCommonAlignment(DL, SI, LI), C);
718 MD->removeInstruction(SI);
719 SI->eraseFromParent();
720 MD->removeInstruction(LI);
721 LI->eraseFromParent();
729 // There are two cases that are interesting for this code to handle: memcpy
730 // and memset. Right now we only handle memset.
732 // Ensure that the value being stored is something that can be memset'able a
733 // byte at a time like "0" or "-1" or any width, as well as things like
734 // 0xA0A0A0A0 and 0.0.
735 auto *V = SI->getOperand(0);
736 if (Value *ByteVal = isBytewiseValue(V)) {
737 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
739 BBI = I->getIterator(); // Don't invalidate iterator.
743 // If we have an aggregate, we try to promote it to memset regardless
744 // of opportunity for merging as it can expose optimization opportunities
745 // in subsequent passes.
746 auto *T = V->getType();
747 if (T->isAggregateType()) {
748 uint64_t Size = DL.getTypeStoreSize(T);
749 unsigned Align = SI->getAlignment();
751 Align = DL.getABITypeAlignment(T);
752 IRBuilder<> Builder(SI);
753 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal,
754 Size, Align, SI->isVolatile());
756 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
758 MD->removeInstruction(SI);
759 SI->eraseFromParent();
762 // Make sure we do not invalidate the iterator.
763 BBI = M->getIterator();
771 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
772 // See if there is another memset or store neighboring this memset which
773 // allows us to widen out the memset to do a single larger store.
774 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
775 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
777 BBI = I->getIterator(); // Don't invalidate iterator.
783 /// Takes a memcpy and a call that it depends on,
784 /// and checks for the possibility of a call slot optimization by having
785 /// the call write its result directly into the destination of the memcpy.
786 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
787 Value *cpySrc, uint64_t cpyLen,
788 unsigned cpyAlign, CallInst *C) {
789 // The general transformation to keep in mind is
791 // call @func(..., src, ...)
792 // memcpy(dest, src, ...)
796 // memcpy(dest, src, ...)
797 // call @func(..., dest, ...)
799 // Since moving the memcpy is technically awkward, we additionally check that
800 // src only holds uninitialized values at the moment of the call, meaning that
801 // the memcpy can be discarded rather than moved.
803 // Lifetime marks shouldn't be operated on.
804 if (Function *F = C->getCalledFunction())
805 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
808 // Deliberately get the source and destination with bitcasts stripped away,
809 // because we'll need to do type comparisons based on the underlying type.
812 // Require that src be an alloca. This simplifies the reasoning considerably.
813 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
817 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
821 const DataLayout &DL = cpy->getModule()->getDataLayout();
822 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
823 srcArraySize->getZExtValue();
825 if (cpyLen < srcSize)
828 // Check that accessing the first srcSize bytes of dest will not cause a
829 // trap. Otherwise the transform is invalid since it might cause a trap
830 // to occur earlier than it otherwise would.
831 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
832 // The destination is an alloca. Check it is larger than srcSize.
833 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
837 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
838 destArraySize->getZExtValue();
840 if (destSize < srcSize)
842 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
843 // The store to dest may never happen if the call can throw.
847 if (A->getDereferenceableBytes() < srcSize) {
848 // If the destination is an sret parameter then only accesses that are
849 // outside of the returned struct type can trap.
850 if (!A->hasStructRetAttr())
853 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
854 if (!StructTy->isSized()) {
855 // The call may never return and hence the copy-instruction may never
856 // be executed, and therefore it's not safe to say "the destination
857 // has at least <cpyLen> bytes, as implied by the copy-instruction",
861 uint64_t destSize = DL.getTypeAllocSize(StructTy);
862 if (destSize < srcSize)
869 // Check that dest points to memory that is at least as aligned as src.
870 unsigned srcAlign = srcAlloca->getAlignment();
872 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
873 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
874 // If dest is not aligned enough and we can't increase its alignment then
876 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
879 // Check that src is not accessed except via the call and the memcpy. This
880 // guarantees that it holds only undefined values when passed in (so the final
881 // memcpy can be dropped), that it is not read or written between the call and
882 // the memcpy, and that writing beyond the end of it is undefined.
883 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
884 srcAlloca->user_end());
885 while (!srcUseList.empty()) {
886 User *U = srcUseList.pop_back_val();
888 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
889 for (User *UU : U->users())
890 srcUseList.push_back(UU);
893 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
894 if (!G->hasAllZeroIndices())
897 for (User *UU : U->users())
898 srcUseList.push_back(UU);
901 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
902 if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
903 IT->getIntrinsicID() == Intrinsic::lifetime_end)
906 if (U != C && U != cpy)
910 // Check that src isn't captured by the called function since the
911 // transformation can cause aliasing issues in that case.
912 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
913 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
916 // Since we're changing the parameter to the callsite, we need to make sure
917 // that what would be the new parameter dominates the callsite.
918 DominatorTree &DT = LookupDomTree();
919 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
920 if (!DT.dominates(cpyDestInst, C))
923 // In addition to knowing that the call does not access src in some
924 // unexpected manner, for example via a global, which we deduce from
925 // the use analysis, we also need to know that it does not sneakily
926 // access dest. We rely on AA to figure this out for us.
927 AliasAnalysis &AA = LookupAliasAnalysis();
928 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
929 // If necessary, perform additional analysis.
930 if (MR != MRI_NoModRef)
931 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
932 if (MR != MRI_NoModRef)
935 // We can't create address space casts here because we don't know if they're
936 // safe for the target.
937 if (cpySrc->getType()->getPointerAddressSpace() !=
938 cpyDest->getType()->getPointerAddressSpace())
940 for (unsigned i = 0; i < CS.arg_size(); ++i)
941 if (CS.getArgument(i)->stripPointerCasts() == cpySrc &&
942 cpySrc->getType()->getPointerAddressSpace() !=
943 CS.getArgument(i)->getType()->getPointerAddressSpace())
946 // All the checks have passed, so do the transformation.
947 bool changedArgument = false;
948 for (unsigned i = 0; i < CS.arg_size(); ++i)
949 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
950 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
951 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
952 cpyDest->getName(), C);
953 changedArgument = true;
954 if (CS.getArgument(i)->getType() == Dest->getType())
955 CS.setArgument(i, Dest);
957 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
958 CS.getArgument(i)->getType(), Dest->getName(), C));
961 if (!changedArgument)
964 // If the destination wasn't sufficiently aligned then increase its alignment.
965 if (!isDestSufficientlyAligned) {
966 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
967 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
970 // Drop any cached information about the call, because we may have changed
971 // its dependence information by changing its parameter.
972 MD->removeInstruction(C);
974 // Update AA metadata
975 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
976 // handled here, but combineMetadata doesn't support them yet
977 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
978 LLVMContext::MD_noalias,
979 LLVMContext::MD_invariant_group};
980 combineMetadata(C, cpy, KnownIDs);
982 // Remove the memcpy.
983 MD->removeInstruction(cpy);
989 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
990 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
991 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
993 // We can only transforms memcpy's where the dest of one is the source of the
995 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
998 // If dep instruction is reading from our current input, then it is a noop
999 // transfer and substituting the input won't change this instruction. Just
1000 // ignore the input and let someone else zap MDep. This handles cases like:
1003 if (M->getSource() == MDep->getSource())
1006 // Second, the length of the memcpy's must be the same, or the preceding one
1007 // must be larger than the following one.
1008 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1009 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1010 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1013 AliasAnalysis &AA = LookupAliasAnalysis();
1015 // Verify that the copied-from memory doesn't change in between the two
1016 // transfers. For example, in:
1020 // It would be invalid to transform the second memcpy into memcpy(c <- b).
1022 // TODO: If the code between M and MDep is transparent to the destination "c",
1023 // then we could still perform the xform by moving M up to the first memcpy.
1025 // NOTE: This is conservative, it will stop on any read from the source loc,
1026 // not just the defining memcpy.
1027 MemDepResult SourceDep =
1028 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1029 M->getIterator(), M->getParent());
1030 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1033 // If the dest of the second might alias the source of the first, then the
1034 // source and dest might overlap. We still want to eliminate the intermediate
1035 // value, but we have to generate a memmove instead of memcpy.
1036 bool UseMemMove = false;
1037 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1038 MemoryLocation::getForSource(MDep)))
1041 // If all checks passed, then we can transform M.
1043 // Make sure to use the lesser of the alignment of the source and the dest
1044 // since we're changing where we're reading from, but don't want to increase
1045 // the alignment past what can be read from or written to.
1046 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1047 // example we could be moving from movaps -> movq on x86.
1048 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
1050 IRBuilder<> Builder(M);
1052 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
1053 Align, M->isVolatile());
1055 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
1056 Align, M->isVolatile());
1058 // Remove the instruction we're replacing.
1059 MD->removeInstruction(M);
1060 M->eraseFromParent();
1065 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1066 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1067 /// weren't copied over by \p MemCpy.
1069 /// In other words, transform:
1071 /// memset(dst, c, dst_size);
1072 /// memcpy(dst, src, src_size);
1076 /// memcpy(dst, src, src_size);
1077 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1079 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1080 MemSetInst *MemSet) {
1081 // We can only transform memset/memcpy with the same destination.
1082 if (MemSet->getDest() != MemCpy->getDest())
1085 // Check that there are no other dependencies on the memset destination.
1086 MemDepResult DstDepInfo =
1087 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1088 MemCpy->getIterator(), MemCpy->getParent());
1089 if (DstDepInfo.getInst() != MemSet)
1092 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1093 Value *Dest = MemCpy->getRawDest();
1094 Value *DestSize = MemSet->getLength();
1095 Value *SrcSize = MemCpy->getLength();
1097 // By default, create an unaligned memset.
1099 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1101 const unsigned DestAlign =
1102 std::max(MemSet->getAlignment(), MemCpy->getAlignment());
1104 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1105 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1107 IRBuilder<> Builder(MemCpy);
1109 // If the sizes have different types, zext the smaller one.
1110 if (DestSize->getType() != SrcSize->getType()) {
1111 if (DestSize->getType()->getIntegerBitWidth() >
1112 SrcSize->getType()->getIntegerBitWidth())
1113 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1115 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1118 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1119 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1120 Value *MemsetLen = Builder.CreateSelect(
1121 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1122 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
1125 MD->removeInstruction(MemSet);
1126 MemSet->eraseFromParent();
1130 /// Transform memcpy to memset when its source was just memset.
1131 /// In other words, turn:
1133 /// memset(dst1, c, dst1_size);
1134 /// memcpy(dst2, dst1, dst2_size);
1138 /// memset(dst1, c, dst1_size);
1139 /// memset(dst2, c, dst2_size);
1141 /// When dst2_size <= dst1_size.
1143 /// The \p MemCpy must have a Constant length.
1144 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1145 MemSetInst *MemSet) {
1146 AliasAnalysis &AA = LookupAliasAnalysis();
1148 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1149 // memcpying from the same address. Otherwise it is hard to reason about.
1150 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1153 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1154 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1155 // Make sure the memcpy doesn't read any more than what the memset wrote.
1156 // Don't worry about sizes larger than i64.
1157 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
1160 IRBuilder<> Builder(MemCpy);
1161 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1162 CopySize, MemCpy->getAlignment());
1166 /// Perform simplification of memcpy's. If we have memcpy A
1167 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1168 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1169 /// circumstances). This allows later passes to remove the first memcpy
1171 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
1172 // We can only optimize non-volatile memcpy's.
1173 if (M->isVolatile()) return false;
1175 // If the source and destination of the memcpy are the same, then zap it.
1176 if (M->getSource() == M->getDest()) {
1177 MD->removeInstruction(M);
1178 M->eraseFromParent();
1182 // If copying from a constant, try to turn the memcpy into a memset.
1183 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1184 if (GV->isConstant() && GV->hasDefinitiveInitializer())
1185 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
1186 IRBuilder<> Builder(M);
1187 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1188 M->getAlignment(), false);
1189 MD->removeInstruction(M);
1190 M->eraseFromParent();
1195 MemDepResult DepInfo = MD->getDependency(M);
1197 // Try to turn a partially redundant memset + memcpy into
1198 // memcpy + smaller memset. We don't need the memcpy size for this.
1199 if (DepInfo.isClobber())
1200 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1201 if (processMemSetMemCpyDependence(M, MDep))
1204 // The optimizations after this point require the memcpy size.
1205 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
1206 if (!CopySize) return false;
1208 // There are four possible optimizations we can do for memcpy:
1209 // a) memcpy-memcpy xform which exposes redundance for DSE.
1210 // b) call-memcpy xform for return slot optimization.
1211 // c) memcpy from freshly alloca'd space or space that has just started its
1212 // lifetime copies undefined data, and we can therefore eliminate the
1213 // memcpy in favor of the data that was already at the destination.
1214 // d) memcpy from a just-memset'd source can be turned into memset.
1215 if (DepInfo.isClobber()) {
1216 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1217 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
1218 CopySize->getZExtValue(), M->getAlignment(),
1220 MD->removeInstruction(M);
1221 M->eraseFromParent();
1227 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1228 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1229 SrcLoc, true, M->getIterator(), M->getParent());
1231 if (SrcDepInfo.isClobber()) {
1232 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1233 return processMemCpyMemCpyDependence(M, MDep);
1234 } else if (SrcDepInfo.isDef()) {
1235 Instruction *I = SrcDepInfo.getInst();
1236 bool hasUndefContents = false;
1238 if (isa<AllocaInst>(I)) {
1239 hasUndefContents = true;
1240 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1241 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1242 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1243 if (LTSize->getZExtValue() >= CopySize->getZExtValue())
1244 hasUndefContents = true;
1247 if (hasUndefContents) {
1248 MD->removeInstruction(M);
1249 M->eraseFromParent();
1255 if (SrcDepInfo.isClobber())
1256 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1257 if (performMemCpyToMemSetOptzn(M, MDep)) {
1258 MD->removeInstruction(M);
1259 M->eraseFromParent();
1267 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1269 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1270 AliasAnalysis &AA = LookupAliasAnalysis();
1272 if (!TLI->has(LibFunc_memmove))
1275 // See if the pointers alias.
1276 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1277 MemoryLocation::getForSource(M)))
1280 DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1283 // If not, then we know we can transform this.
1284 Type *ArgTys[3] = { M->getRawDest()->getType(),
1285 M->getRawSource()->getType(),
1286 M->getLength()->getType() };
1287 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1288 Intrinsic::memcpy, ArgTys));
1290 // MemDep may have over conservative information about this instruction, just
1291 // conservatively flush it from the cache.
1292 MD->removeInstruction(M);
1298 /// This is called on every byval argument in call sites.
1299 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
1300 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
1301 // Find out what feeds this byval argument.
1302 Value *ByValArg = CS.getArgument(ArgNo);
1303 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1304 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1305 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1306 MemoryLocation(ByValArg, ByValSize), true,
1307 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
1308 if (!DepInfo.isClobber())
1311 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1312 // a memcpy, see if we can byval from the source of the memcpy instead of the
1314 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1315 if (!MDep || MDep->isVolatile() ||
1316 ByValArg->stripPointerCasts() != MDep->getDest())
1319 // The length of the memcpy must be larger or equal to the size of the byval.
1320 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1321 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1324 // Get the alignment of the byval. If the call doesn't specify the alignment,
1325 // then it is some target specific value that we can't know.
1326 unsigned ByValAlign = CS.getParamAlignment(ArgNo);
1327 if (ByValAlign == 0) return false;
1329 // If it is greater than the memcpy, then we check to see if we can force the
1330 // source of the memcpy to the alignment we need. If we fail, we bail out.
1331 AssumptionCache &AC = LookupAssumptionCache();
1332 DominatorTree &DT = LookupDomTree();
1333 if (MDep->getAlignment() < ByValAlign &&
1334 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1335 CS.getInstruction(), &AC, &DT) < ByValAlign)
1338 // The address space of the memcpy source must match the byval argument
1339 if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1340 ByValArg->getType()->getPointerAddressSpace())
1343 // Verify that the copied-from memory doesn't change in between the memcpy and
1348 // It would be invalid to transform the second memcpy into foo(*b).
1350 // NOTE: This is conservative, it will stop on any read from the source loc,
1351 // not just the defining memcpy.
1352 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1353 MemoryLocation::getForSource(MDep), false,
1354 CS.getInstruction()->getIterator(), MDep->getParent());
1355 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1358 Value *TmpCast = MDep->getSource();
1359 if (MDep->getSource()->getType() != ByValArg->getType())
1360 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1361 "tmpcast", CS.getInstruction());
1363 DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1364 << " " << *MDep << "\n"
1365 << " " << *CS.getInstruction() << "\n");
1367 // Otherwise we're good! Update the byval argument.
1368 CS.setArgument(ArgNo, TmpCast);
1373 /// Executes one iteration of MemCpyOptPass.
1374 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1375 bool MadeChange = false;
1377 // Walk all instruction in the function.
1378 for (BasicBlock &BB : F) {
1379 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1380 // Avoid invalidating the iterator.
1381 Instruction *I = &*BI++;
1383 bool RepeatInstruction = false;
1385 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1386 MadeChange |= processStore(SI, BI);
1387 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1388 RepeatInstruction = processMemSet(M, BI);
1389 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1390 RepeatInstruction = processMemCpy(M);
1391 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1392 RepeatInstruction = processMemMove(M);
1393 else if (auto CS = CallSite(I)) {
1394 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1395 if (CS.isByValArgument(i))
1396 MadeChange |= processByValArgument(CS, i);
1399 // Reprocess the instruction if desired.
1400 if (RepeatInstruction) {
1401 if (BI != BB.begin())
1411 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1412 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1413 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1415 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
1416 return AM.getResult<AAManager>(F);
1418 auto LookupAssumptionCache = [&]() -> AssumptionCache & {
1419 return AM.getResult<AssumptionAnalysis>(F);
1421 auto LookupDomTree = [&]() -> DominatorTree & {
1422 return AM.getResult<DominatorTreeAnalysis>(F);
1425 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
1426 LookupAssumptionCache, LookupDomTree);
1428 return PreservedAnalyses::all();
1430 PreservedAnalyses PA;
1431 PA.preserveSet<CFGAnalyses>();
1432 PA.preserve<GlobalsAA>();
1433 PA.preserve<MemoryDependenceAnalysis>();
1437 bool MemCpyOptPass::runImpl(
1438 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
1439 std::function<AliasAnalysis &()> LookupAliasAnalysis_,
1440 std::function<AssumptionCache &()> LookupAssumptionCache_,
1441 std::function<DominatorTree &()> LookupDomTree_) {
1442 bool MadeChange = false;
1445 LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
1446 LookupAssumptionCache = std::move(LookupAssumptionCache_);
1447 LookupDomTree = std::move(LookupDomTree_);
1449 // If we don't have at least memset and memcpy, there is little point of doing
1450 // anything here. These are required by a freestanding implementation, so if
1451 // even they are disabled, there is no point in trying hard.
1452 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
1456 if (!iterateOnFunction(F))
1465 /// This is the main transformation entry point for a function.
1466 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1467 if (skipFunction(F))
1470 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1471 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1473 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
1474 return getAnalysis<AAResultsWrapperPass>().getAAResults();
1476 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
1477 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1479 auto LookupDomTree = [this]() -> DominatorTree & {
1480 return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1483 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,