1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/MapVector.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfo.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
26 #include "llvm/Transforms/Utils/Local.h"
29 #define DEBUG_TYPE "instcombine"
31 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
32 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
34 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
35 /// some part of a constant global variable. This intentionally only accepts
36 /// constant expressions because we can't rewrite arbitrary instructions.
37 static bool pointsToConstantGlobal(Value *V) {
38 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
39 return GV->isConstant();
41 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
42 if (CE->getOpcode() == Instruction::BitCast ||
43 CE->getOpcode() == Instruction::AddrSpaceCast ||
44 CE->getOpcode() == Instruction::GetElementPtr)
45 return pointsToConstantGlobal(CE->getOperand(0));
50 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
51 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
52 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
53 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
54 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
55 /// the alloca, and if the source pointer is a pointer to a constant global, we
56 /// can optimize this.
58 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
59 SmallVectorImpl<Instruction *> &ToDelete) {
60 // We track lifetime intrinsics as we encounter them. If we decide to go
61 // ahead and replace the value with the global, this lets the caller quickly
62 // eliminate the markers.
64 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
65 ValuesToInspect.emplace_back(V, false);
66 while (!ValuesToInspect.empty()) {
67 auto ValuePair = ValuesToInspect.pop_back_val();
68 const bool IsOffset = ValuePair.second;
69 for (auto &U : ValuePair.first->uses()) {
70 auto *I = cast<Instruction>(U.getUser());
72 if (auto *LI = dyn_cast<LoadInst>(I)) {
73 // Ignore non-volatile loads, they are always ok.
74 if (!LI->isSimple()) return false;
78 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
79 // If uses of the bitcast are ok, we are ok.
80 ValuesToInspect.emplace_back(I, IsOffset);
83 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
84 // If the GEP has all zero indices, it doesn't offset the pointer. If it
86 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
90 if (auto CS = CallSite(I)) {
91 // If this is the function being called then we treat it like a load and
96 unsigned DataOpNo = CS.getDataOperandNo(&U);
97 bool IsArgOperand = CS.isArgOperand(&U);
99 // Inalloca arguments are clobbered by the call.
100 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
103 // If this is a readonly/readnone call site, then we know it is just a
104 // load (but one that potentially returns the value itself), so we can
105 // ignore it if we know that the value isn't captured.
106 if (CS.onlyReadsMemory() &&
107 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
110 // If this is being passed as a byval argument, the caller is making a
111 // copy, so it is only a read of the alloca.
112 if (IsArgOperand && CS.isByValArgument(DataOpNo))
116 // Lifetime intrinsics can be handled by the caller.
117 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
118 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
119 II->getIntrinsicID() == Intrinsic::lifetime_end) {
120 assert(II->use_empty() && "Lifetime markers have no result to use!");
121 ToDelete.push_back(II);
126 // If this is isn't our memcpy/memmove, reject it as something we can't
128 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
132 // If the transfer is using the alloca as a source of the transfer, then
133 // ignore it since it is a load (unless the transfer is volatile).
134 if (U.getOperandNo() == 1) {
135 if (MI->isVolatile()) return false;
139 // If we already have seen a copy, reject the second one.
140 if (TheCopy) return false;
142 // If the pointer has been offset from the start of the alloca, we can't
143 // safely handle this.
144 if (IsOffset) return false;
146 // If the memintrinsic isn't using the alloca as the dest, reject it.
147 if (U.getOperandNo() != 0) return false;
149 // If the source of the memcpy/move is not a constant global, reject it.
150 if (!pointsToConstantGlobal(MI->getSource()))
153 // Otherwise, the transform is safe. Remember the copy instruction.
160 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
161 /// modified by a copy from a constant global. If we can prove this, we can
162 /// replace any uses of the alloca with uses of the global directly.
163 static MemTransferInst *
164 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
165 SmallVectorImpl<Instruction *> &ToDelete) {
166 MemTransferInst *TheCopy = nullptr;
167 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
172 /// Returns true if V is dereferenceable for size of alloca.
173 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
174 const DataLayout &DL) {
175 if (AI->isArrayAllocation())
177 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
180 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
181 APInt(64, AllocaSize), DL);
184 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
185 // Check for array size of 1 (scalar allocation).
186 if (!AI.isArrayAllocation()) {
187 // i32 1 is the canonical array size for scalar allocations.
188 if (AI.getArraySize()->getType()->isIntegerTy(32))
192 Value *V = IC.Builder.getInt32(1);
197 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
198 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
199 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
200 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
201 New->setAlignment(AI.getAlignment());
203 // Scan to the end of the allocation instructions, to skip over a block of
204 // allocas if possible...also skip interleaved debug info
206 BasicBlock::iterator It(New);
207 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
210 // Now that I is pointing to the first non-allocation-inst in the block,
211 // insert our getelementptr instruction...
213 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
214 Value *NullIdx = Constant::getNullValue(IdxTy);
215 Value *Idx[2] = {NullIdx, NullIdx};
217 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
218 IC.InsertNewInstBefore(GEP, *It);
220 // Now make everything use the getelementptr instead of the original
222 return IC.replaceInstUsesWith(AI, GEP);
225 if (isa<UndefValue>(AI.getArraySize()))
226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
231 if (AI.getArraySize()->getType() != IntPtrTy) {
232 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
241 // If I and V are pointers in different address space, it is not allowed to
242 // use replaceAllUsesWith since I and V have different types. A
243 // non-target-specific transformation should not use addrspacecast on V since
244 // the two address space may be disjoint depending on target.
246 // This class chases down uses of the old pointer until reaching the load
247 // instructions, then replaces the old pointer in the load instructions with
248 // the new pointer. If during the chasing it sees bitcast or GEP, it will
249 // create new bitcast or GEP with the new pointer and use them in the load
251 class PointerReplacer {
253 PointerReplacer(InstCombiner &IC) : IC(IC) {}
254 void replacePointer(Instruction &I, Value *V);
257 void findLoadAndReplace(Instruction &I);
258 void replace(Instruction *I);
259 Value *getReplacement(Value *I);
261 SmallVector<Instruction *, 4> Path;
262 MapVector<Value *, Value *> WorkMap;
265 } // end anonymous namespace
267 void PointerReplacer::findLoadAndReplace(Instruction &I) {
268 for (auto U : I.users()) {
269 auto *Inst = dyn_cast<Instruction>(&*U);
272 DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
273 if (isa<LoadInst>(Inst)) {
277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
278 Path.push_back(Inst);
279 findLoadAndReplace(*Inst);
287 Value *PointerReplacer::getReplacement(Value *V) {
288 auto Loc = WorkMap.find(V);
289 if (Loc != WorkMap.end())
294 void PointerReplacer::replace(Instruction *I) {
295 if (getReplacement(I))
298 if (auto *LT = dyn_cast<LoadInst>(I)) {
299 auto *V = getReplacement(LT->getPointerOperand());
300 assert(V && "Operand not replaced");
301 auto *NewI = new LoadInst(V);
303 IC.InsertNewInstWith(NewI, *LT);
304 IC.replaceInstUsesWith(*LT, NewI);
306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307 auto *V = getReplacement(GEP->getPointerOperand());
308 assert(V && "Operand not replaced");
309 SmallVector<Value *, 8> Indices;
310 Indices.append(GEP->idx_begin(), GEP->idx_end());
311 auto *NewI = GetElementPtrInst::Create(
312 V->getType()->getPointerElementType(), V, Indices);
313 IC.InsertNewInstWith(NewI, *GEP);
316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317 auto *V = getReplacement(BC->getOperand(0));
318 assert(V && "Operand not replaced");
319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320 V->getType()->getPointerAddressSpace());
321 auto *NewI = new BitCastInst(V, NewT);
322 IC.InsertNewInstWith(NewI, *BC);
326 llvm_unreachable("should never reach here");
330 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
332 auto *PT = cast<PointerType>(I.getType());
333 auto *NT = cast<PointerType>(V->getType());
334 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
338 findLoadAndReplace(I);
341 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
342 if (auto *I = simplifyAllocaArraySize(*this, AI))
345 if (AI.getAllocatedType()->isSized()) {
346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI.getAlignment() == 0)
348 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
350 // Move all alloca's of zero byte objects to the entry block and merge them
351 // together. Note that we only do this for alloca's, because malloc should
352 // allocate and return a unique pointer, even for a zero byte allocation.
353 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
354 // For a zero sized alloca there is no point in doing an array allocation.
355 // This is helpful if the array size is a complicated expression not used
357 if (AI.isArrayAllocation()) {
358 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
362 // Get the first instruction in the entry block.
363 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
364 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
365 if (FirstInst != &AI) {
366 // If the entry block doesn't start with a zero-size alloca then move
367 // this one to the start of the entry block. There is no problem with
368 // dominance as the array size was forced to a constant earlier already.
369 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
370 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
371 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
372 AI.moveBefore(FirstInst);
376 // If the alignment of the entry block alloca is 0 (unspecified),
377 // assign it the preferred alignment.
378 if (EntryAI->getAlignment() == 0)
379 EntryAI->setAlignment(
380 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
381 // Replace this zero-sized alloca with the one at the start of the entry
382 // block after ensuring that the address will be aligned enough for both
384 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
386 EntryAI->setAlignment(MaxAlign);
387 if (AI.getType() != EntryAI->getType())
388 return new BitCastInst(EntryAI, AI.getType());
389 return replaceInstUsesWith(AI, EntryAI);
394 if (AI.getAlignment()) {
395 // Check to see if this allocation is only modified by a memcpy/memmove from
396 // a constant global whose alignment is equal to or exceeds that of the
397 // allocation. If this is the case, we can change all users to use
398 // the constant global instead. This is commonly produced by the CFE by
399 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
400 // is only subsequently read.
401 SmallVector<Instruction *, 4> ToDelete;
402 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
403 unsigned SourceAlign = getOrEnforceKnownAlignment(
404 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
405 if (AI.getAlignment() <= SourceAlign &&
406 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
407 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
408 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
409 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
410 eraseInstFromFunction(*ToDelete[i]);
411 Constant *TheSrc = cast<Constant>(Copy->getSource());
412 auto *SrcTy = TheSrc->getType();
413 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
414 SrcTy->getPointerAddressSpace());
416 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
417 if (AI.getType()->getPointerAddressSpace() ==
418 SrcTy->getPointerAddressSpace()) {
419 Instruction *NewI = replaceInstUsesWith(AI, Cast);
420 eraseInstFromFunction(*Copy);
424 PointerReplacer PtrReplacer(*this);
425 PtrReplacer.replacePointer(AI, Cast);
432 // At last, use the generic allocation site handler to aggressively remove
434 return visitAllocSite(AI);
437 // Are we allowed to form a atomic load or store of this type?
438 static bool isSupportedAtomicType(Type *Ty) {
439 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
442 /// \brief Helper to combine a load to a new type.
444 /// This just does the work of combining a load to a new type. It handles
445 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
446 /// loaded *value* type. This will convert it to a pointer, cast the operand to
447 /// that pointer type, load it, etc.
449 /// Note that this will create all of the instructions with whatever insert
450 /// point the \c InstCombiner currently is using.
451 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
452 const Twine &Suffix = "") {
453 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
454 "can't fold an atomic load to requested type");
456 Value *Ptr = LI.getPointerOperand();
457 unsigned AS = LI.getPointerAddressSpace();
458 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
459 LI.getAllMetadata(MD);
461 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
462 IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
463 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
464 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
465 MDBuilder MDB(NewLoad->getContext());
466 for (const auto &MDPair : MD) {
467 unsigned ID = MDPair.first;
468 MDNode *N = MDPair.second;
469 // Note, essentially every kind of metadata should be preserved here! This
470 // routine is supposed to clone a load instruction changing *only its type*.
471 // The only metadata it makes sense to drop is metadata which is invalidated
472 // when the pointer type changes. This should essentially never be the case
473 // in LLVM, but we explicitly switch over only known metadata to be
474 // conservatively correct. If you are adding metadata to LLVM which pertains
475 // to loads, you almost certainly want to add it here.
477 case LLVMContext::MD_dbg:
478 case LLVMContext::MD_tbaa:
479 case LLVMContext::MD_prof:
480 case LLVMContext::MD_fpmath:
481 case LLVMContext::MD_tbaa_struct:
482 case LLVMContext::MD_invariant_load:
483 case LLVMContext::MD_alias_scope:
484 case LLVMContext::MD_noalias:
485 case LLVMContext::MD_nontemporal:
486 case LLVMContext::MD_mem_parallel_loop_access:
487 // All of these directly apply.
488 NewLoad->setMetadata(ID, N);
491 case LLVMContext::MD_nonnull:
492 copyNonnullMetadata(LI, N, *NewLoad);
494 case LLVMContext::MD_align:
495 case LLVMContext::MD_dereferenceable:
496 case LLVMContext::MD_dereferenceable_or_null:
497 // These only directly apply if the new type is also a pointer.
498 if (NewTy->isPointerTy())
499 NewLoad->setMetadata(ID, N);
501 case LLVMContext::MD_range:
502 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad);
509 /// \brief Combine a store to a new type.
511 /// Returns the newly created store instruction.
512 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
513 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
514 "can't fold an atomic store of requested type");
516 Value *Ptr = SI.getPointerOperand();
517 unsigned AS = SI.getPointerAddressSpace();
518 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
519 SI.getAllMetadata(MD);
521 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
522 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
523 SI.getAlignment(), SI.isVolatile());
524 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
525 for (const auto &MDPair : MD) {
526 unsigned ID = MDPair.first;
527 MDNode *N = MDPair.second;
528 // Note, essentially every kind of metadata should be preserved here! This
529 // routine is supposed to clone a store instruction changing *only its
530 // type*. The only metadata it makes sense to drop is metadata which is
531 // invalidated when the pointer type changes. This should essentially
532 // never be the case in LLVM, but we explicitly switch over only known
533 // metadata to be conservatively correct. If you are adding metadata to
534 // LLVM which pertains to stores, you almost certainly want to add it
537 case LLVMContext::MD_dbg:
538 case LLVMContext::MD_tbaa:
539 case LLVMContext::MD_prof:
540 case LLVMContext::MD_fpmath:
541 case LLVMContext::MD_tbaa_struct:
542 case LLVMContext::MD_alias_scope:
543 case LLVMContext::MD_noalias:
544 case LLVMContext::MD_nontemporal:
545 case LLVMContext::MD_mem_parallel_loop_access:
546 // All of these directly apply.
547 NewStore->setMetadata(ID, N);
550 case LLVMContext::MD_invariant_load:
551 case LLVMContext::MD_nonnull:
552 case LLVMContext::MD_range:
553 case LLVMContext::MD_align:
554 case LLVMContext::MD_dereferenceable:
555 case LLVMContext::MD_dereferenceable_or_null:
556 // These don't apply for stores.
564 /// \brief Combine loads to match the type of their uses' value after looking
565 /// through intervening bitcasts.
567 /// The core idea here is that if the result of a load is used in an operation,
568 /// we should load the type most conducive to that operation. For example, when
569 /// loading an integer and converting that immediately to a pointer, we should
570 /// instead directly load a pointer.
572 /// However, this routine must never change the width of a load or the number of
573 /// loads as that would introduce a semantic change. This combine is expected to
574 /// be a semantic no-op which just allows loads to more closely model the types
575 /// of their consuming operations.
577 /// Currently, we also refuse to change the precise type used for an atomic load
578 /// or a volatile load. This is debatable, and might be reasonable to change
579 /// later. However, it is risky in case some backend or other part of LLVM is
580 /// relying on the exact type loaded to select appropriate atomic operations.
581 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
582 // FIXME: We could probably with some care handle both volatile and ordered
583 // atomic loads here but it isn't clear that this is important.
584 if (!LI.isUnordered())
590 // swifterror values can't be bitcasted.
591 if (LI.getPointerOperand()->isSwiftError())
594 Type *Ty = LI.getType();
595 const DataLayout &DL = IC.getDataLayout();
597 // Try to canonicalize loads which are only ever stored to operate over
598 // integers instead of any other type. We only do this when the loaded type
599 // is sized and has a size exactly the same as its store size and the store
600 // size is a legal integer type.
601 if (!Ty->isIntegerTy() && Ty->isSized() &&
602 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
603 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
604 !DL.isNonIntegralPointerType(Ty)) {
605 if (all_of(LI.users(), [&LI](User *U) {
606 auto *SI = dyn_cast<StoreInst>(U);
607 return SI && SI->getPointerOperand() != &LI &&
608 !SI->getPointerOperand()->isSwiftError();
610 LoadInst *NewLoad = combineLoadToNewType(
612 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
613 // Replace all the stores with stores of the newly loaded value.
614 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
615 auto *SI = cast<StoreInst>(*UI++);
616 IC.Builder.SetInsertPoint(SI);
617 combineStoreToNewValue(IC, *SI, NewLoad);
618 IC.eraseInstFromFunction(*SI);
620 assert(LI.use_empty() && "Failed to remove all users of the load!");
621 // Return the old load so the combiner can delete it safely.
626 // Fold away bit casts of the loaded value by loading the desired type.
627 // We can do this for BitCastInsts as well as casts from and to pointer types,
628 // as long as those are noops (i.e., the source or dest type have the same
629 // bitwidth as the target's pointers).
631 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
632 if (CI->isNoopCast(DL))
633 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
634 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
635 CI->replaceAllUsesWith(NewLoad);
636 IC.eraseInstFromFunction(*CI);
640 // FIXME: We should also canonicalize loads of vectors when their elements are
641 // cast to other types.
645 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
646 // FIXME: We could probably with some care handle both volatile and atomic
647 // stores here but it isn't clear that this is important.
651 Type *T = LI.getType();
652 if (!T->isAggregateType())
655 StringRef Name = LI.getName();
656 assert(LI.getAlignment() && "Alignment must be set at this point");
658 if (auto *ST = dyn_cast<StructType>(T)) {
659 // If the struct only have one element, we unpack.
660 auto NumElements = ST->getNumElements();
661 if (NumElements == 1) {
662 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
665 LI.getAAMetadata(AAMD);
666 NewLoad->setAAMetadata(AAMD);
667 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
668 UndefValue::get(T), NewLoad, 0, Name));
671 // We don't want to break loads with padding here as we'd loose
672 // the knowledge that padding exists for the rest of the pipeline.
673 const DataLayout &DL = IC.getDataLayout();
674 auto *SL = DL.getStructLayout(ST);
675 if (SL->hasPadding())
678 auto Align = LI.getAlignment();
680 Align = DL.getABITypeAlignment(ST);
682 auto *Addr = LI.getPointerOperand();
683 auto *IdxType = Type::getInt32Ty(T->getContext());
684 auto *Zero = ConstantInt::get(IdxType, 0);
686 Value *V = UndefValue::get(T);
687 for (unsigned i = 0; i < NumElements; i++) {
688 Value *Indices[2] = {
690 ConstantInt::get(IdxType, i),
692 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
694 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
695 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
696 // Propagate AA metadata. It'll still be valid on the narrowed load.
698 LI.getAAMetadata(AAMD);
699 L->setAAMetadata(AAMD);
700 V = IC.Builder.CreateInsertValue(V, L, i);
704 return IC.replaceInstUsesWith(LI, V);
707 if (auto *AT = dyn_cast<ArrayType>(T)) {
708 auto *ET = AT->getElementType();
709 auto NumElements = AT->getNumElements();
710 if (NumElements == 1) {
711 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
713 LI.getAAMetadata(AAMD);
714 NewLoad->setAAMetadata(AAMD);
715 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
716 UndefValue::get(T), NewLoad, 0, Name));
719 // Bail out if the array is too large. Ideally we would like to optimize
720 // arrays of arbitrary size but this has a terrible impact on compile time.
721 // The threshold here is chosen arbitrarily, maybe needs a little bit of
723 if (NumElements > IC.MaxArraySizeForCombine)
726 const DataLayout &DL = IC.getDataLayout();
727 auto EltSize = DL.getTypeAllocSize(ET);
728 auto Align = LI.getAlignment();
730 Align = DL.getABITypeAlignment(T);
732 auto *Addr = LI.getPointerOperand();
733 auto *IdxType = Type::getInt64Ty(T->getContext());
734 auto *Zero = ConstantInt::get(IdxType, 0);
736 Value *V = UndefValue::get(T);
738 for (uint64_t i = 0; i < NumElements; i++) {
739 Value *Indices[2] = {
741 ConstantInt::get(IdxType, i),
743 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
745 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
748 LI.getAAMetadata(AAMD);
749 L->setAAMetadata(AAMD);
750 V = IC.Builder.CreateInsertValue(V, L, i);
755 return IC.replaceInstUsesWith(LI, V);
761 // If we can determine that all possible objects pointed to by the provided
762 // pointer value are, not only dereferenceable, but also definitively less than
763 // or equal to the provided maximum size, then return true. Otherwise, return
764 // false (constant global values and allocas fall into this category).
766 // FIXME: This should probably live in ValueTracking (or similar).
767 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
768 const DataLayout &DL) {
769 SmallPtrSet<Value *, 4> Visited;
770 SmallVector<Value *, 4> Worklist(1, V);
773 Value *P = Worklist.pop_back_val();
774 P = P->stripPointerCasts();
776 if (!Visited.insert(P).second)
779 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
780 Worklist.push_back(SI->getTrueValue());
781 Worklist.push_back(SI->getFalseValue());
785 if (PHINode *PN = dyn_cast<PHINode>(P)) {
786 for (Value *IncValue : PN->incoming_values())
787 Worklist.push_back(IncValue);
791 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
792 if (GA->isInterposable())
794 Worklist.push_back(GA->getAliasee());
798 // If we know how big this object is, and it is less than MaxSize, continue
799 // searching. Otherwise, return false.
800 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
801 if (!AI->getAllocatedType()->isSized())
804 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
808 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
809 // Make sure that, even if the multiplication below would wrap as an
810 // uint64_t, we still do the right thing.
811 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
816 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
817 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
820 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
821 if (InitSize > MaxSize)
827 } while (!Worklist.empty());
832 // If we're indexing into an object of a known size, and the outer index is
833 // not a constant, but having any value but zero would lead to undefined
834 // behavior, replace it with zero.
836 // For example, if we have:
837 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
839 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
840 // ... = load i32* %arrayidx, align 4
841 // Then we know that we can replace %x in the GEP with i64 0.
843 // FIXME: We could fold any GEP index to zero that would cause UB if it were
844 // not zero. Currently, we only handle the first such index. Also, we could
845 // also search through non-zero constant indices if we kept track of the
846 // offsets those indices implied.
847 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
848 Instruction *MemI, unsigned &Idx) {
849 if (GEPI->getNumOperands() < 2)
852 // Find the first non-zero index of a GEP. If all indices are zero, return
853 // one past the last index.
854 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
856 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
857 Value *V = GEPI->getOperand(I);
858 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
868 // Skip through initial 'zero' indices, and find the corresponding pointer
869 // type. See if the next index is not a constant.
870 Idx = FirstNZIdx(GEPI);
871 if (Idx == GEPI->getNumOperands())
873 if (isa<Constant>(GEPI->getOperand(Idx)))
876 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
878 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
879 if (!AllocTy || !AllocTy->isSized())
881 const DataLayout &DL = IC.getDataLayout();
882 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
884 // If there are more indices after the one we might replace with a zero, make
885 // sure they're all non-negative. If any of them are negative, the overall
886 // address being computed might be before the base address determined by the
887 // first non-zero index.
888 auto IsAllNonNegative = [&]() {
889 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
890 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
891 if (Known.isNonNegative())
899 // FIXME: If the GEP is not inbounds, and there are extra indices after the
900 // one we'll replace, those could cause the address computation to wrap
901 // (rendering the IsAllNonNegative() check below insufficient). We can do
902 // better, ignoring zero indices (and other indices we can prove small
903 // enough not to wrap).
904 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
907 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
908 // also known to be dereferenceable.
909 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
913 // If we're indexing into an object with a variable index for the memory
914 // access, but the object has only one element, we can assume that the index
915 // will always be zero. If we replace the GEP, return it.
916 template <typename T>
917 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
919 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
921 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
922 Instruction *NewGEPI = GEPI->clone();
923 NewGEPI->setOperand(Idx,
924 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
925 NewGEPI->insertBefore(GEPI);
926 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
934 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
935 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
936 const Value *GEPI0 = GEPI->getOperand(0);
937 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0)
940 if (isa<UndefValue>(Op) ||
941 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0))
946 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
947 Value *Op = LI.getOperand(0);
949 // Try to canonicalize the loaded type.
950 if (Instruction *Res = combineLoadToOperationType(*this, LI))
953 // Attempt to improve the alignment.
954 unsigned KnownAlign = getOrEnforceKnownAlignment(
955 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
956 unsigned LoadAlign = LI.getAlignment();
957 unsigned EffectiveLoadAlign =
958 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
960 if (KnownAlign > EffectiveLoadAlign)
961 LI.setAlignment(KnownAlign);
962 else if (LoadAlign == 0)
963 LI.setAlignment(EffectiveLoadAlign);
965 // Replace GEP indices if possible.
966 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
967 Worklist.Add(NewGEPI);
971 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
974 // Do really simple store-to-load forwarding and load CSE, to catch cases
975 // where there are several consecutive memory accesses to the same location,
976 // separated by a few arithmetic operations.
977 BasicBlock::iterator BBI(LI);
978 bool IsLoadCSE = false;
979 if (Value *AvailableVal = FindAvailableLoadedValue(
980 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
982 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
984 return replaceInstUsesWith(
985 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
986 LI.getName() + ".cast"));
989 // None of the following transforms are legal for volatile/ordered atomic
990 // loads. Most of them do apply for unordered atomics.
991 if (!LI.isUnordered()) return nullptr;
993 // load(gep null, ...) -> unreachable
994 // load null/undef -> unreachable
995 // TODO: Consider a target hook for valid address spaces for this xforms.
996 if (canSimplifyNullLoadOrGEP(LI, Op)) {
997 // Insert a new store to null instruction before the load to indicate
998 // that this code is not reachable. We do this instead of inserting
999 // an unreachable instruction directly because we cannot modify the
1001 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1002 Constant::getNullValue(Op->getType()), &LI);
1003 SI->setDebugLoc(LI.getDebugLoc());
1004 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
1007 if (Op->hasOneUse()) {
1008 // Change select and PHI nodes to select values instead of addresses: this
1009 // helps alias analysis out a lot, allows many others simplifications, and
1010 // exposes redundancy in the code.
1012 // Note that we cannot do the transformation unless we know that the
1013 // introduced loads cannot trap! Something like this is valid as long as
1014 // the condition is always false: load (select bool %C, int* null, int* %G),
1015 // but it would not be valid if we transformed it to load from null
1018 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1019 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1020 unsigned Align = LI.getAlignment();
1021 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1022 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
1023 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
1024 SI->getOperand(1)->getName()+".val");
1025 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
1026 SI->getOperand(2)->getName()+".val");
1027 assert(LI.isUnordered() && "implied by above");
1028 V1->setAlignment(Align);
1029 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1030 V2->setAlignment(Align);
1031 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1032 return SelectInst::Create(SI->getCondition(), V1, V2);
1035 // load (select (cond, null, P)) -> load P
1036 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1037 LI.getPointerAddressSpace() == 0) {
1038 LI.setOperand(0, SI->getOperand(2));
1042 // load (select (cond, P, null)) -> load P
1043 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1044 LI.getPointerAddressSpace() == 0) {
1045 LI.setOperand(0, SI->getOperand(1));
1053 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
1055 /// \returns underlying value that was "cast", or nullptr otherwise.
1057 /// For example, if we have:
1059 /// %E0 = extractelement <2 x double> %U, i32 0
1060 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1061 /// %E1 = extractelement <2 x double> %U, i32 1
1062 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1064 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1065 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1066 /// Note that %U may contain non-undef values where %V1 has undef.
1067 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1069 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1070 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1073 auto *W = E->getVectorOperand();
1078 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1079 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1081 V = IV->getAggregateOperand();
1083 if (!isa<UndefValue>(V) ||!U)
1086 auto *UT = cast<VectorType>(U->getType());
1087 auto *VT = V->getType();
1088 // Check that types UT and VT are bitwise isomorphic.
1089 const auto &DL = IC.getDataLayout();
1090 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1093 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1094 if (AT->getNumElements() != UT->getNumElements())
1097 auto *ST = cast<StructType>(VT);
1098 if (ST->getNumElements() != UT->getNumElements())
1100 for (const auto *EltT : ST->elements()) {
1101 if (EltT != UT->getElementType())
1108 /// \brief Combine stores to match the type of value being stored.
1110 /// The core idea here is that the memory does not have any intrinsic type and
1111 /// where we can we should match the type of a store to the type of value being
1114 /// However, this routine must never change the width of a store or the number of
1115 /// stores as that would introduce a semantic change. This combine is expected to
1116 /// be a semantic no-op which just allows stores to more closely model the types
1117 /// of their incoming values.
1119 /// Currently, we also refuse to change the precise type used for an atomic or
1120 /// volatile store. This is debatable, and might be reasonable to change later.
1121 /// However, it is risky in case some backend or other part of LLVM is relying
1122 /// on the exact type stored to select appropriate atomic operations.
1124 /// \returns true if the store was successfully combined away. This indicates
1125 /// the caller must erase the store instruction. We have to let the caller erase
1126 /// the store instruction as otherwise there is no way to signal whether it was
1127 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1128 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1129 // FIXME: We could probably with some care handle both volatile and ordered
1130 // atomic stores here but it isn't clear that this is important.
1131 if (!SI.isUnordered())
1134 // swifterror values can't be bitcasted.
1135 if (SI.getPointerOperand()->isSwiftError())
1138 Value *V = SI.getValueOperand();
1140 // Fold away bit casts of the stored value by storing the original type.
1141 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1142 V = BC->getOperand(0);
1143 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1144 combineStoreToNewValue(IC, SI, V);
1149 if (Value *U = likeBitCastFromVector(IC, V))
1150 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1151 combineStoreToNewValue(IC, SI, U);
1155 // FIXME: We should also canonicalize stores of vectors when their elements
1156 // are cast to other types.
1160 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1161 // FIXME: We could probably with some care handle both volatile and atomic
1162 // stores here but it isn't clear that this is important.
1166 Value *V = SI.getValueOperand();
1167 Type *T = V->getType();
1169 if (!T->isAggregateType())
1172 if (auto *ST = dyn_cast<StructType>(T)) {
1173 // If the struct only have one element, we unpack.
1174 unsigned Count = ST->getNumElements();
1176 V = IC.Builder.CreateExtractValue(V, 0);
1177 combineStoreToNewValue(IC, SI, V);
1181 // We don't want to break loads with padding here as we'd loose
1182 // the knowledge that padding exists for the rest of the pipeline.
1183 const DataLayout &DL = IC.getDataLayout();
1184 auto *SL = DL.getStructLayout(ST);
1185 if (SL->hasPadding())
1188 auto Align = SI.getAlignment();
1190 Align = DL.getABITypeAlignment(ST);
1192 SmallString<16> EltName = V->getName();
1194 auto *Addr = SI.getPointerOperand();
1195 SmallString<16> AddrName = Addr->getName();
1196 AddrName += ".repack";
1198 auto *IdxType = Type::getInt32Ty(ST->getContext());
1199 auto *Zero = ConstantInt::get(IdxType, 0);
1200 for (unsigned i = 0; i < Count; i++) {
1201 Value *Indices[2] = {
1203 ConstantInt::get(IdxType, i),
1205 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1207 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1208 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1209 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1211 SI.getAAMetadata(AAMD);
1212 NS->setAAMetadata(AAMD);
1218 if (auto *AT = dyn_cast<ArrayType>(T)) {
1219 // If the array only have one element, we unpack.
1220 auto NumElements = AT->getNumElements();
1221 if (NumElements == 1) {
1222 V = IC.Builder.CreateExtractValue(V, 0);
1223 combineStoreToNewValue(IC, SI, V);
1227 // Bail out if the array is too large. Ideally we would like to optimize
1228 // arrays of arbitrary size but this has a terrible impact on compile time.
1229 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1231 if (NumElements > IC.MaxArraySizeForCombine)
1234 const DataLayout &DL = IC.getDataLayout();
1235 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1236 auto Align = SI.getAlignment();
1238 Align = DL.getABITypeAlignment(T);
1240 SmallString<16> EltName = V->getName();
1242 auto *Addr = SI.getPointerOperand();
1243 SmallString<16> AddrName = Addr->getName();
1244 AddrName += ".repack";
1246 auto *IdxType = Type::getInt64Ty(T->getContext());
1247 auto *Zero = ConstantInt::get(IdxType, 0);
1249 uint64_t Offset = 0;
1250 for (uint64_t i = 0; i < NumElements; i++) {
1251 Value *Indices[2] = {
1253 ConstantInt::get(IdxType, i),
1255 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1257 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1258 auto EltAlign = MinAlign(Align, Offset);
1259 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1261 SI.getAAMetadata(AAMD);
1262 NS->setAAMetadata(AAMD);
1272 /// equivalentAddressValues - Test if A and B will obviously have the same
1273 /// value. This includes recognizing that %t0 and %t1 will have the same
1274 /// value in code like this:
1275 /// %t0 = getelementptr \@a, 0, 3
1276 /// store i32 0, i32* %t0
1277 /// %t1 = getelementptr \@a, 0, 3
1278 /// %t2 = load i32* %t1
1280 static bool equivalentAddressValues(Value *A, Value *B) {
1281 // Test if the values are trivially equivalent.
1282 if (A == B) return true;
1284 // Test if the values come form identical arithmetic instructions.
1285 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1286 // its only used to compare two uses within the same basic block, which
1287 // means that they'll always either have the same value or one of them
1288 // will have an undefined value.
1289 if (isa<BinaryOperator>(A) ||
1292 isa<GetElementPtrInst>(A))
1293 if (Instruction *BI = dyn_cast<Instruction>(B))
1294 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1297 // Otherwise they may not be equivalent.
1301 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1302 Value *Val = SI.getOperand(0);
1303 Value *Ptr = SI.getOperand(1);
1305 // Try to canonicalize the stored type.
1306 if (combineStoreToValueType(*this, SI))
1307 return eraseInstFromFunction(SI);
1309 // Attempt to improve the alignment.
1310 unsigned KnownAlign = getOrEnforceKnownAlignment(
1311 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
1312 unsigned StoreAlign = SI.getAlignment();
1313 unsigned EffectiveStoreAlign =
1314 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1316 if (KnownAlign > EffectiveStoreAlign)
1317 SI.setAlignment(KnownAlign);
1318 else if (StoreAlign == 0)
1319 SI.setAlignment(EffectiveStoreAlign);
1321 // Try to canonicalize the stored type.
1322 if (unpackStoreToAggregate(*this, SI))
1323 return eraseInstFromFunction(SI);
1325 // Replace GEP indices if possible.
1326 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1327 Worklist.Add(NewGEPI);
1331 // Don't hack volatile/ordered stores.
1332 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1333 if (!SI.isUnordered()) return nullptr;
1335 // If the RHS is an alloca with a single use, zapify the store, making the
1337 if (Ptr->hasOneUse()) {
1338 if (isa<AllocaInst>(Ptr))
1339 return eraseInstFromFunction(SI);
1340 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1341 if (isa<AllocaInst>(GEP->getOperand(0))) {
1342 if (GEP->getOperand(0)->hasOneUse())
1343 return eraseInstFromFunction(SI);
1348 // Do really simple DSE, to catch cases where there are several consecutive
1349 // stores to the same location, separated by a few arithmetic operations. This
1350 // situation often occurs with bitfield accesses.
1351 BasicBlock::iterator BBI(SI);
1352 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1355 // Don't count debug info directives, lest they affect codegen,
1356 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1357 if (isa<DbgInfoIntrinsic>(BBI) ||
1358 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1363 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1364 // Prev store isn't volatile, and stores to the same location?
1365 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1366 SI.getOperand(1))) {
1369 eraseInstFromFunction(*PrevSI);
1375 // If this is a load, we have to stop. However, if the loaded value is from
1376 // the pointer we're loading and is producing the pointer we're storing,
1377 // then *this* store is dead (X = load P; store X -> P).
1378 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1379 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1380 assert(SI.isUnordered() && "can't eliminate ordering operation");
1381 return eraseInstFromFunction(SI);
1384 // Otherwise, this is a load from some other location. Stores before it
1389 // Don't skip over loads, throws or things that can modify memory.
1390 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1394 // store X, null -> turns into 'unreachable' in SimplifyCFG
1395 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1396 if (!isa<UndefValue>(Val)) {
1397 SI.setOperand(0, UndefValue::get(Val->getType()));
1398 if (Instruction *U = dyn_cast<Instruction>(Val))
1399 Worklist.Add(U); // Dropped a use.
1401 return nullptr; // Do not modify these!
1404 // store undef, Ptr -> noop
1405 if (isa<UndefValue>(Val))
1406 return eraseInstFromFunction(SI);
1408 // If this store is the last instruction in the basic block (possibly
1409 // excepting debug info instructions), and if the block ends with an
1410 // unconditional branch, try to move it to the successor block.
1411 BBI = SI.getIterator();
1414 } while (isa<DbgInfoIntrinsic>(BBI) ||
1415 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1416 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1417 if (BI->isUnconditional())
1418 if (SimplifyStoreAtEndOfBlock(SI))
1419 return nullptr; // xform done!
1424 /// SimplifyStoreAtEndOfBlock - Turn things like:
1425 /// if () { *P = v1; } else { *P = v2 }
1426 /// into a phi node with a store in the successor.
1428 /// Simplify things like:
1429 /// *P = v1; if () { *P = v2; }
1430 /// into a phi node with a store in the successor.
1432 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1433 assert(SI.isUnordered() &&
1434 "this code has not been auditted for volatile or ordered store case");
1436 BasicBlock *StoreBB = SI.getParent();
1438 // Check to see if the successor block has exactly two incoming edges. If
1439 // so, see if the other predecessor contains a store to the same location.
1440 // if so, insert a PHI node (if needed) and move the stores down.
1441 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1443 // Determine whether Dest has exactly two predecessors and, if so, compute
1444 // the other predecessor.
1445 pred_iterator PI = pred_begin(DestBB);
1446 BasicBlock *P = *PI;
1447 BasicBlock *OtherBB = nullptr;
1452 if (++PI == pred_end(DestBB))
1461 if (++PI != pred_end(DestBB))
1464 // Bail out if all the relevant blocks aren't distinct (this can happen,
1465 // for example, if SI is in an infinite loop)
1466 if (StoreBB == DestBB || OtherBB == DestBB)
1469 // Verify that the other block ends in a branch and is not otherwise empty.
1470 BasicBlock::iterator BBI(OtherBB->getTerminator());
1471 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1472 if (!OtherBr || BBI == OtherBB->begin())
1475 // If the other block ends in an unconditional branch, check for the 'if then
1476 // else' case. there is an instruction before the branch.
1477 StoreInst *OtherStore = nullptr;
1478 if (OtherBr->isUnconditional()) {
1480 // Skip over debugging info.
1481 while (isa<DbgInfoIntrinsic>(BBI) ||
1482 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1483 if (BBI==OtherBB->begin())
1487 // If this isn't a store, isn't a store to the same location, or is not the
1488 // right kind of store, bail out.
1489 OtherStore = dyn_cast<StoreInst>(BBI);
1490 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1491 !SI.isSameOperationAs(OtherStore))
1494 // Otherwise, the other block ended with a conditional branch. If one of the
1495 // destinations is StoreBB, then we have the if/then case.
1496 if (OtherBr->getSuccessor(0) != StoreBB &&
1497 OtherBr->getSuccessor(1) != StoreBB)
1500 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1501 // if/then triangle. See if there is a store to the same ptr as SI that
1502 // lives in OtherBB.
1504 // Check to see if we find the matching store.
1505 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1506 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1507 !SI.isSameOperationAs(OtherStore))
1511 // If we find something that may be using or overwriting the stored
1512 // value, or if we run out of instructions, we can't do the xform.
1513 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1514 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1518 // In order to eliminate the store in OtherBr, we have to
1519 // make sure nothing reads or overwrites the stored value in
1521 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1522 // FIXME: This should really be AA driven.
1523 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1528 // Insert a PHI node now if we need it.
1529 Value *MergedVal = OtherStore->getOperand(0);
1530 if (MergedVal != SI.getOperand(0)) {
1531 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1532 PN->addIncoming(SI.getOperand(0), SI.getParent());
1533 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1534 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1537 // Advance to a place where it is safe to insert the new store and
1539 BBI = DestBB->getFirstInsertionPt();
1540 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1544 SI.getSyncScopeID());
1545 InsertNewInstBefore(NewSI, *BBI);
1546 // The debug locations of the original instructions might differ; merge them.
1547 NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(),
1548 OtherStore->getDebugLoc()));
1550 // If the two stores had AA tags, merge them.
1552 SI.getAAMetadata(AATags);
1554 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1555 NewSI->setAAMetadata(AATags);
1558 // Nuke the old stores.
1559 eraseInstFromFunction(SI);
1560 eraseInstFromFunction(*OtherStore);