1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/MapVector.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/MDBuilder.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
26 #include "llvm/Transforms/Utils/Local.h"
28 using namespace PatternMatch;
30 #define DEBUG_TYPE "instcombine"
32 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
33 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
35 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
36 /// some part of a constant global variable. This intentionally only accepts
37 /// constant expressions because we can't rewrite arbitrary instructions.
38 static bool pointsToConstantGlobal(Value *V) {
39 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
40 return GV->isConstant();
42 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
43 if (CE->getOpcode() == Instruction::BitCast ||
44 CE->getOpcode() == Instruction::AddrSpaceCast ||
45 CE->getOpcode() == Instruction::GetElementPtr)
46 return pointsToConstantGlobal(CE->getOperand(0));
51 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
52 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
53 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
54 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
55 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
56 /// the alloca, and if the source pointer is a pointer to a constant global, we
57 /// can optimize this.
59 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
60 SmallVectorImpl<Instruction *> &ToDelete) {
61 // We track lifetime intrinsics as we encounter them. If we decide to go
62 // ahead and replace the value with the global, this lets the caller quickly
63 // eliminate the markers.
65 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
66 ValuesToInspect.emplace_back(V, false);
67 while (!ValuesToInspect.empty()) {
68 auto ValuePair = ValuesToInspect.pop_back_val();
69 const bool IsOffset = ValuePair.second;
70 for (auto &U : ValuePair.first->uses()) {
71 auto *I = cast<Instruction>(U.getUser());
73 if (auto *LI = dyn_cast<LoadInst>(I)) {
74 // Ignore non-volatile loads, they are always ok.
75 if (!LI->isSimple()) return false;
79 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
80 // If uses of the bitcast are ok, we are ok.
81 ValuesToInspect.emplace_back(I, IsOffset);
84 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
85 // If the GEP has all zero indices, it doesn't offset the pointer. If it
87 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
91 if (auto CS = CallSite(I)) {
92 // If this is the function being called then we treat it like a load and
97 unsigned DataOpNo = CS.getDataOperandNo(&U);
98 bool IsArgOperand = CS.isArgOperand(&U);
100 // Inalloca arguments are clobbered by the call.
101 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
104 // If this is a readonly/readnone call site, then we know it is just a
105 // load (but one that potentially returns the value itself), so we can
106 // ignore it if we know that the value isn't captured.
107 if (CS.onlyReadsMemory() &&
108 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
111 // If this is being passed as a byval argument, the caller is making a
112 // copy, so it is only a read of the alloca.
113 if (IsArgOperand && CS.isByValArgument(DataOpNo))
117 // Lifetime intrinsics can be handled by the caller.
118 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
119 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
120 II->getIntrinsicID() == Intrinsic::lifetime_end) {
121 assert(II->use_empty() && "Lifetime markers have no result to use!");
122 ToDelete.push_back(II);
127 // If this is isn't our memcpy/memmove, reject it as something we can't
129 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
133 // If the transfer is using the alloca as a source of the transfer, then
134 // ignore it since it is a load (unless the transfer is volatile).
135 if (U.getOperandNo() == 1) {
136 if (MI->isVolatile()) return false;
140 // If we already have seen a copy, reject the second one.
141 if (TheCopy) return false;
143 // If the pointer has been offset from the start of the alloca, we can't
144 // safely handle this.
145 if (IsOffset) return false;
147 // If the memintrinsic isn't using the alloca as the dest, reject it.
148 if (U.getOperandNo() != 0) return false;
150 // If the source of the memcpy/move is not a constant global, reject it.
151 if (!pointsToConstantGlobal(MI->getSource()))
154 // Otherwise, the transform is safe. Remember the copy instruction.
161 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
162 /// modified by a copy from a constant global. If we can prove this, we can
163 /// replace any uses of the alloca with uses of the global directly.
164 static MemTransferInst *
165 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
166 SmallVectorImpl<Instruction *> &ToDelete) {
167 MemTransferInst *TheCopy = nullptr;
168 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
173 /// Returns true if V is dereferenceable for size of alloca.
174 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
175 const DataLayout &DL) {
176 if (AI->isArrayAllocation())
178 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
181 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
182 APInt(64, AllocaSize), DL);
185 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
186 // Check for array size of 1 (scalar allocation).
187 if (!AI.isArrayAllocation()) {
188 // i32 1 is the canonical array size for scalar allocations.
189 if (AI.getArraySize()->getType()->isIntegerTy(32))
193 Value *V = IC.Builder.getInt32(1);
198 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
199 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
200 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
201 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
202 New->setAlignment(AI.getAlignment());
204 // Scan to the end of the allocation instructions, to skip over a block of
205 // allocas if possible...also skip interleaved debug info
207 BasicBlock::iterator It(New);
208 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
211 // Now that I is pointing to the first non-allocation-inst in the block,
212 // insert our getelementptr instruction...
214 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
215 Value *NullIdx = Constant::getNullValue(IdxTy);
216 Value *Idx[2] = {NullIdx, NullIdx};
218 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
219 IC.InsertNewInstBefore(GEP, *It);
221 // Now make everything use the getelementptr instead of the original
223 return IC.replaceInstUsesWith(AI, GEP);
226 if (isa<UndefValue>(AI.getArraySize()))
227 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
229 // Ensure that the alloca array size argument has type intptr_t, so that
230 // any casting is exposed early.
231 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
232 if (AI.getArraySize()->getType() != IntPtrTy) {
233 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
242 // If I and V are pointers in different address space, it is not allowed to
243 // use replaceAllUsesWith since I and V have different types. A
244 // non-target-specific transformation should not use addrspacecast on V since
245 // the two address space may be disjoint depending on target.
247 // This class chases down uses of the old pointer until reaching the load
248 // instructions, then replaces the old pointer in the load instructions with
249 // the new pointer. If during the chasing it sees bitcast or GEP, it will
250 // create new bitcast or GEP with the new pointer and use them in the load
252 class PointerReplacer {
254 PointerReplacer(InstCombiner &IC) : IC(IC) {}
255 void replacePointer(Instruction &I, Value *V);
258 void findLoadAndReplace(Instruction &I);
259 void replace(Instruction *I);
260 Value *getReplacement(Value *I);
262 SmallVector<Instruction *, 4> Path;
263 MapVector<Value *, Value *> WorkMap;
266 } // end anonymous namespace
268 void PointerReplacer::findLoadAndReplace(Instruction &I) {
269 for (auto U : I.users()) {
270 auto *Inst = dyn_cast<Instruction>(&*U);
273 DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
274 if (isa<LoadInst>(Inst)) {
278 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
279 Path.push_back(Inst);
280 findLoadAndReplace(*Inst);
288 Value *PointerReplacer::getReplacement(Value *V) {
289 auto Loc = WorkMap.find(V);
290 if (Loc != WorkMap.end())
295 void PointerReplacer::replace(Instruction *I) {
296 if (getReplacement(I))
299 if (auto *LT = dyn_cast<LoadInst>(I)) {
300 auto *V = getReplacement(LT->getPointerOperand());
301 assert(V && "Operand not replaced");
302 auto *NewI = new LoadInst(V);
304 IC.InsertNewInstWith(NewI, *LT);
305 IC.replaceInstUsesWith(*LT, NewI);
307 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
308 auto *V = getReplacement(GEP->getPointerOperand());
309 assert(V && "Operand not replaced");
310 SmallVector<Value *, 8> Indices;
311 Indices.append(GEP->idx_begin(), GEP->idx_end());
312 auto *NewI = GetElementPtrInst::Create(
313 V->getType()->getPointerElementType(), V, Indices);
314 IC.InsertNewInstWith(NewI, *GEP);
317 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
318 auto *V = getReplacement(BC->getOperand(0));
319 assert(V && "Operand not replaced");
320 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
321 V->getType()->getPointerAddressSpace());
322 auto *NewI = new BitCastInst(V, NewT);
323 IC.InsertNewInstWith(NewI, *BC);
327 llvm_unreachable("should never reach here");
331 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
333 auto *PT = cast<PointerType>(I.getType());
334 auto *NT = cast<PointerType>(V->getType());
335 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
339 findLoadAndReplace(I);
342 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
343 if (auto *I = simplifyAllocaArraySize(*this, AI))
346 if (AI.getAllocatedType()->isSized()) {
347 // If the alignment is 0 (unspecified), assign it the preferred alignment.
348 if (AI.getAlignment() == 0)
349 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
351 // Move all alloca's of zero byte objects to the entry block and merge them
352 // together. Note that we only do this for alloca's, because malloc should
353 // allocate and return a unique pointer, even for a zero byte allocation.
354 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
355 // For a zero sized alloca there is no point in doing an array allocation.
356 // This is helpful if the array size is a complicated expression not used
358 if (AI.isArrayAllocation()) {
359 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
363 // Get the first instruction in the entry block.
364 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
365 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
366 if (FirstInst != &AI) {
367 // If the entry block doesn't start with a zero-size alloca then move
368 // this one to the start of the entry block. There is no problem with
369 // dominance as the array size was forced to a constant earlier already.
370 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
371 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
372 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
373 AI.moveBefore(FirstInst);
377 // If the alignment of the entry block alloca is 0 (unspecified),
378 // assign it the preferred alignment.
379 if (EntryAI->getAlignment() == 0)
380 EntryAI->setAlignment(
381 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
382 // Replace this zero-sized alloca with the one at the start of the entry
383 // block after ensuring that the address will be aligned enough for both
385 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
387 EntryAI->setAlignment(MaxAlign);
388 if (AI.getType() != EntryAI->getType())
389 return new BitCastInst(EntryAI, AI.getType());
390 return replaceInstUsesWith(AI, EntryAI);
395 if (AI.getAlignment()) {
396 // Check to see if this allocation is only modified by a memcpy/memmove from
397 // a constant global whose alignment is equal to or exceeds that of the
398 // allocation. If this is the case, we can change all users to use
399 // the constant global instead. This is commonly produced by the CFE by
400 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
401 // is only subsequently read.
402 SmallVector<Instruction *, 4> ToDelete;
403 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
404 unsigned SourceAlign = getOrEnforceKnownAlignment(
405 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
406 if (AI.getAlignment() <= SourceAlign &&
407 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
408 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
409 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
410 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
411 eraseInstFromFunction(*ToDelete[i]);
412 Constant *TheSrc = cast<Constant>(Copy->getSource());
413 auto *SrcTy = TheSrc->getType();
414 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
415 SrcTy->getPointerAddressSpace());
417 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
418 if (AI.getType()->getPointerAddressSpace() ==
419 SrcTy->getPointerAddressSpace()) {
420 Instruction *NewI = replaceInstUsesWith(AI, Cast);
421 eraseInstFromFunction(*Copy);
425 PointerReplacer PtrReplacer(*this);
426 PtrReplacer.replacePointer(AI, Cast);
433 // At last, use the generic allocation site handler to aggressively remove
435 return visitAllocSite(AI);
438 // Are we allowed to form a atomic load or store of this type?
439 static bool isSupportedAtomicType(Type *Ty) {
440 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
443 /// \brief Helper to combine a load to a new type.
445 /// This just does the work of combining a load to a new type. It handles
446 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
447 /// loaded *value* type. This will convert it to a pointer, cast the operand to
448 /// that pointer type, load it, etc.
450 /// Note that this will create all of the instructions with whatever insert
451 /// point the \c InstCombiner currently is using.
452 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
453 const Twine &Suffix = "") {
454 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
455 "can't fold an atomic load to requested type");
457 Value *Ptr = LI.getPointerOperand();
458 unsigned AS = LI.getPointerAddressSpace();
459 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
460 LI.getAllMetadata(MD);
462 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
463 IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
464 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
465 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
466 MDBuilder MDB(NewLoad->getContext());
467 for (const auto &MDPair : MD) {
468 unsigned ID = MDPair.first;
469 MDNode *N = MDPair.second;
470 // Note, essentially every kind of metadata should be preserved here! This
471 // routine is supposed to clone a load instruction changing *only its type*.
472 // The only metadata it makes sense to drop is metadata which is invalidated
473 // when the pointer type changes. This should essentially never be the case
474 // in LLVM, but we explicitly switch over only known metadata to be
475 // conservatively correct. If you are adding metadata to LLVM which pertains
476 // to loads, you almost certainly want to add it here.
478 case LLVMContext::MD_dbg:
479 case LLVMContext::MD_tbaa:
480 case LLVMContext::MD_prof:
481 case LLVMContext::MD_fpmath:
482 case LLVMContext::MD_tbaa_struct:
483 case LLVMContext::MD_invariant_load:
484 case LLVMContext::MD_alias_scope:
485 case LLVMContext::MD_noalias:
486 case LLVMContext::MD_nontemporal:
487 case LLVMContext::MD_mem_parallel_loop_access:
488 // All of these directly apply.
489 NewLoad->setMetadata(ID, N);
492 case LLVMContext::MD_nonnull:
493 copyNonnullMetadata(LI, N, *NewLoad);
495 case LLVMContext::MD_align:
496 case LLVMContext::MD_dereferenceable:
497 case LLVMContext::MD_dereferenceable_or_null:
498 // These only directly apply if the new type is also a pointer.
499 if (NewTy->isPointerTy())
500 NewLoad->setMetadata(ID, N);
502 case LLVMContext::MD_range:
503 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad);
510 /// \brief Combine a store to a new type.
512 /// Returns the newly created store instruction.
513 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
514 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
515 "can't fold an atomic store of requested type");
517 Value *Ptr = SI.getPointerOperand();
518 unsigned AS = SI.getPointerAddressSpace();
519 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
520 SI.getAllMetadata(MD);
522 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
523 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
524 SI.getAlignment(), SI.isVolatile());
525 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
526 for (const auto &MDPair : MD) {
527 unsigned ID = MDPair.first;
528 MDNode *N = MDPair.second;
529 // Note, essentially every kind of metadata should be preserved here! This
530 // routine is supposed to clone a store instruction changing *only its
531 // type*. The only metadata it makes sense to drop is metadata which is
532 // invalidated when the pointer type changes. This should essentially
533 // never be the case in LLVM, but we explicitly switch over only known
534 // metadata to be conservatively correct. If you are adding metadata to
535 // LLVM which pertains to stores, you almost certainly want to add it
538 case LLVMContext::MD_dbg:
539 case LLVMContext::MD_tbaa:
540 case LLVMContext::MD_prof:
541 case LLVMContext::MD_fpmath:
542 case LLVMContext::MD_tbaa_struct:
543 case LLVMContext::MD_alias_scope:
544 case LLVMContext::MD_noalias:
545 case LLVMContext::MD_nontemporal:
546 case LLVMContext::MD_mem_parallel_loop_access:
547 // All of these directly apply.
548 NewStore->setMetadata(ID, N);
551 case LLVMContext::MD_invariant_load:
552 case LLVMContext::MD_nonnull:
553 case LLVMContext::MD_range:
554 case LLVMContext::MD_align:
555 case LLVMContext::MD_dereferenceable:
556 case LLVMContext::MD_dereferenceable_or_null:
557 // These don't apply for stores.
565 /// Returns true if instruction represent minmax pattern like:
566 /// select ((cmp load V1, load V2), V1, V2).
567 static bool isMinMaxWithLoads(Value *V) {
568 assert(V->getType()->isPointerTy() && "Expected pointer type.");
569 // Ignore possible ty* to ixx* bitcast.
570 V = peekThroughBitcast(V);
571 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
573 CmpInst::Predicate Pred;
578 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
579 m_Value(LHS), m_Value(RHS))))
581 return (match(L1, m_Load(m_Specific(LHS))) &&
582 match(L2, m_Load(m_Specific(RHS)))) ||
583 (match(L1, m_Load(m_Specific(RHS))) &&
584 match(L2, m_Load(m_Specific(LHS))));
587 /// \brief Combine loads to match the type of their uses' value after looking
588 /// through intervening bitcasts.
590 /// The core idea here is that if the result of a load is used in an operation,
591 /// we should load the type most conducive to that operation. For example, when
592 /// loading an integer and converting that immediately to a pointer, we should
593 /// instead directly load a pointer.
595 /// However, this routine must never change the width of a load or the number of
596 /// loads as that would introduce a semantic change. This combine is expected to
597 /// be a semantic no-op which just allows loads to more closely model the types
598 /// of their consuming operations.
600 /// Currently, we also refuse to change the precise type used for an atomic load
601 /// or a volatile load. This is debatable, and might be reasonable to change
602 /// later. However, it is risky in case some backend or other part of LLVM is
603 /// relying on the exact type loaded to select appropriate atomic operations.
604 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
605 // FIXME: We could probably with some care handle both volatile and ordered
606 // atomic loads here but it isn't clear that this is important.
607 if (!LI.isUnordered())
613 // swifterror values can't be bitcasted.
614 if (LI.getPointerOperand()->isSwiftError())
617 Type *Ty = LI.getType();
618 const DataLayout &DL = IC.getDataLayout();
620 // Try to canonicalize loads which are only ever stored to operate over
621 // integers instead of any other type. We only do this when the loaded type
622 // is sized and has a size exactly the same as its store size and the store
623 // size is a legal integer type.
624 // Do not perform canonicalization if minmax pattern is found (to avoid
626 if (!Ty->isIntegerTy() && Ty->isSized() &&
627 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
628 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
629 !DL.isNonIntegralPointerType(Ty) &&
631 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true))) {
632 if (all_of(LI.users(), [&LI](User *U) {
633 auto *SI = dyn_cast<StoreInst>(U);
634 return SI && SI->getPointerOperand() != &LI &&
635 !SI->getPointerOperand()->isSwiftError();
637 LoadInst *NewLoad = combineLoadToNewType(
639 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
640 // Replace all the stores with stores of the newly loaded value.
641 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
642 auto *SI = cast<StoreInst>(*UI++);
643 IC.Builder.SetInsertPoint(SI);
644 combineStoreToNewValue(IC, *SI, NewLoad);
645 IC.eraseInstFromFunction(*SI);
647 assert(LI.use_empty() && "Failed to remove all users of the load!");
648 // Return the old load so the combiner can delete it safely.
653 // Fold away bit casts of the loaded value by loading the desired type.
654 // We can do this for BitCastInsts as well as casts from and to pointer types,
655 // as long as those are noops (i.e., the source or dest type have the same
656 // bitwidth as the target's pointers).
658 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
659 if (CI->isNoopCast(DL))
660 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
661 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
662 CI->replaceAllUsesWith(NewLoad);
663 IC.eraseInstFromFunction(*CI);
667 // FIXME: We should also canonicalize loads of vectors when their elements are
668 // cast to other types.
672 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
673 // FIXME: We could probably with some care handle both volatile and atomic
674 // stores here but it isn't clear that this is important.
678 Type *T = LI.getType();
679 if (!T->isAggregateType())
682 StringRef Name = LI.getName();
683 assert(LI.getAlignment() && "Alignment must be set at this point");
685 if (auto *ST = dyn_cast<StructType>(T)) {
686 // If the struct only have one element, we unpack.
687 auto NumElements = ST->getNumElements();
688 if (NumElements == 1) {
689 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
692 LI.getAAMetadata(AAMD);
693 NewLoad->setAAMetadata(AAMD);
694 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
695 UndefValue::get(T), NewLoad, 0, Name));
698 // We don't want to break loads with padding here as we'd loose
699 // the knowledge that padding exists for the rest of the pipeline.
700 const DataLayout &DL = IC.getDataLayout();
701 auto *SL = DL.getStructLayout(ST);
702 if (SL->hasPadding())
705 auto Align = LI.getAlignment();
707 Align = DL.getABITypeAlignment(ST);
709 auto *Addr = LI.getPointerOperand();
710 auto *IdxType = Type::getInt32Ty(T->getContext());
711 auto *Zero = ConstantInt::get(IdxType, 0);
713 Value *V = UndefValue::get(T);
714 for (unsigned i = 0; i < NumElements; i++) {
715 Value *Indices[2] = {
717 ConstantInt::get(IdxType, i),
719 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
721 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
722 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
723 // Propagate AA metadata. It'll still be valid on the narrowed load.
725 LI.getAAMetadata(AAMD);
726 L->setAAMetadata(AAMD);
727 V = IC.Builder.CreateInsertValue(V, L, i);
731 return IC.replaceInstUsesWith(LI, V);
734 if (auto *AT = dyn_cast<ArrayType>(T)) {
735 auto *ET = AT->getElementType();
736 auto NumElements = AT->getNumElements();
737 if (NumElements == 1) {
738 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
740 LI.getAAMetadata(AAMD);
741 NewLoad->setAAMetadata(AAMD);
742 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
743 UndefValue::get(T), NewLoad, 0, Name));
746 // Bail out if the array is too large. Ideally we would like to optimize
747 // arrays of arbitrary size but this has a terrible impact on compile time.
748 // The threshold here is chosen arbitrarily, maybe needs a little bit of
750 if (NumElements > IC.MaxArraySizeForCombine)
753 const DataLayout &DL = IC.getDataLayout();
754 auto EltSize = DL.getTypeAllocSize(ET);
755 auto Align = LI.getAlignment();
757 Align = DL.getABITypeAlignment(T);
759 auto *Addr = LI.getPointerOperand();
760 auto *IdxType = Type::getInt64Ty(T->getContext());
761 auto *Zero = ConstantInt::get(IdxType, 0);
763 Value *V = UndefValue::get(T);
765 for (uint64_t i = 0; i < NumElements; i++) {
766 Value *Indices[2] = {
768 ConstantInt::get(IdxType, i),
770 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
772 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
775 LI.getAAMetadata(AAMD);
776 L->setAAMetadata(AAMD);
777 V = IC.Builder.CreateInsertValue(V, L, i);
782 return IC.replaceInstUsesWith(LI, V);
788 // If we can determine that all possible objects pointed to by the provided
789 // pointer value are, not only dereferenceable, but also definitively less than
790 // or equal to the provided maximum size, then return true. Otherwise, return
791 // false (constant global values and allocas fall into this category).
793 // FIXME: This should probably live in ValueTracking (or similar).
794 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
795 const DataLayout &DL) {
796 SmallPtrSet<Value *, 4> Visited;
797 SmallVector<Value *, 4> Worklist(1, V);
800 Value *P = Worklist.pop_back_val();
801 P = P->stripPointerCasts();
803 if (!Visited.insert(P).second)
806 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
807 Worklist.push_back(SI->getTrueValue());
808 Worklist.push_back(SI->getFalseValue());
812 if (PHINode *PN = dyn_cast<PHINode>(P)) {
813 for (Value *IncValue : PN->incoming_values())
814 Worklist.push_back(IncValue);
818 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
819 if (GA->isInterposable())
821 Worklist.push_back(GA->getAliasee());
825 // If we know how big this object is, and it is less than MaxSize, continue
826 // searching. Otherwise, return false.
827 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
828 if (!AI->getAllocatedType()->isSized())
831 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
835 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
836 // Make sure that, even if the multiplication below would wrap as an
837 // uint64_t, we still do the right thing.
838 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
843 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
844 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
847 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
848 if (InitSize > MaxSize)
854 } while (!Worklist.empty());
859 // If we're indexing into an object of a known size, and the outer index is
860 // not a constant, but having any value but zero would lead to undefined
861 // behavior, replace it with zero.
863 // For example, if we have:
864 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
866 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
867 // ... = load i32* %arrayidx, align 4
868 // Then we know that we can replace %x in the GEP with i64 0.
870 // FIXME: We could fold any GEP index to zero that would cause UB if it were
871 // not zero. Currently, we only handle the first such index. Also, we could
872 // also search through non-zero constant indices if we kept track of the
873 // offsets those indices implied.
874 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
875 Instruction *MemI, unsigned &Idx) {
876 if (GEPI->getNumOperands() < 2)
879 // Find the first non-zero index of a GEP. If all indices are zero, return
880 // one past the last index.
881 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
883 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
884 Value *V = GEPI->getOperand(I);
885 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
895 // Skip through initial 'zero' indices, and find the corresponding pointer
896 // type. See if the next index is not a constant.
897 Idx = FirstNZIdx(GEPI);
898 if (Idx == GEPI->getNumOperands())
900 if (isa<Constant>(GEPI->getOperand(Idx)))
903 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
905 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
906 if (!AllocTy || !AllocTy->isSized())
908 const DataLayout &DL = IC.getDataLayout();
909 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
911 // If there are more indices after the one we might replace with a zero, make
912 // sure they're all non-negative. If any of them are negative, the overall
913 // address being computed might be before the base address determined by the
914 // first non-zero index.
915 auto IsAllNonNegative = [&]() {
916 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
917 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
918 if (Known.isNonNegative())
926 // FIXME: If the GEP is not inbounds, and there are extra indices after the
927 // one we'll replace, those could cause the address computation to wrap
928 // (rendering the IsAllNonNegative() check below insufficient). We can do
929 // better, ignoring zero indices (and other indices we can prove small
930 // enough not to wrap).
931 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
934 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
935 // also known to be dereferenceable.
936 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
940 // If we're indexing into an object with a variable index for the memory
941 // access, but the object has only one element, we can assume that the index
942 // will always be zero. If we replace the GEP, return it.
943 template <typename T>
944 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
946 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
948 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
949 Instruction *NewGEPI = GEPI->clone();
950 NewGEPI->setOperand(Idx,
951 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
952 NewGEPI->insertBefore(GEPI);
953 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
961 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
962 if (SI.getPointerAddressSpace() != 0)
965 auto *Ptr = SI.getPointerOperand();
966 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
967 Ptr = GEPI->getOperand(0);
968 return isa<ConstantPointerNull>(Ptr);
971 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
972 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
973 const Value *GEPI0 = GEPI->getOperand(0);
974 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0)
977 if (isa<UndefValue>(Op) ||
978 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0))
983 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
984 Value *Op = LI.getOperand(0);
986 // Try to canonicalize the loaded type.
987 if (Instruction *Res = combineLoadToOperationType(*this, LI))
990 // Attempt to improve the alignment.
991 unsigned KnownAlign = getOrEnforceKnownAlignment(
992 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
993 unsigned LoadAlign = LI.getAlignment();
994 unsigned EffectiveLoadAlign =
995 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
997 if (KnownAlign > EffectiveLoadAlign)
998 LI.setAlignment(KnownAlign);
999 else if (LoadAlign == 0)
1000 LI.setAlignment(EffectiveLoadAlign);
1002 // Replace GEP indices if possible.
1003 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
1004 Worklist.Add(NewGEPI);
1008 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1011 // Do really simple store-to-load forwarding and load CSE, to catch cases
1012 // where there are several consecutive memory accesses to the same location,
1013 // separated by a few arithmetic operations.
1014 BasicBlock::iterator BBI(LI);
1015 bool IsLoadCSE = false;
1016 if (Value *AvailableVal = FindAvailableLoadedValue(
1017 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1019 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
1021 return replaceInstUsesWith(
1022 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1023 LI.getName() + ".cast"));
1026 // None of the following transforms are legal for volatile/ordered atomic
1027 // loads. Most of them do apply for unordered atomics.
1028 if (!LI.isUnordered()) return nullptr;
1030 // load(gep null, ...) -> unreachable
1031 // load null/undef -> unreachable
1032 // TODO: Consider a target hook for valid address spaces for this xforms.
1033 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1034 // Insert a new store to null instruction before the load to indicate
1035 // that this code is not reachable. We do this instead of inserting
1036 // an unreachable instruction directly because we cannot modify the
1038 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1039 Constant::getNullValue(Op->getType()), &LI);
1040 SI->setDebugLoc(LI.getDebugLoc());
1041 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
1044 if (Op->hasOneUse()) {
1045 // Change select and PHI nodes to select values instead of addresses: this
1046 // helps alias analysis out a lot, allows many others simplifications, and
1047 // exposes redundancy in the code.
1049 // Note that we cannot do the transformation unless we know that the
1050 // introduced loads cannot trap! Something like this is valid as long as
1051 // the condition is always false: load (select bool %C, int* null, int* %G),
1052 // but it would not be valid if we transformed it to load from null
1055 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1056 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1057 unsigned Align = LI.getAlignment();
1058 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1059 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
1060 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
1061 SI->getOperand(1)->getName()+".val");
1062 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
1063 SI->getOperand(2)->getName()+".val");
1064 assert(LI.isUnordered() && "implied by above");
1065 V1->setAlignment(Align);
1066 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1067 V2->setAlignment(Align);
1068 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1069 return SelectInst::Create(SI->getCondition(), V1, V2);
1072 // load (select (cond, null, P)) -> load P
1073 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1074 LI.getPointerAddressSpace() == 0) {
1075 LI.setOperand(0, SI->getOperand(2));
1079 // load (select (cond, P, null)) -> load P
1080 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1081 LI.getPointerAddressSpace() == 0) {
1082 LI.setOperand(0, SI->getOperand(1));
1090 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
1092 /// \returns underlying value that was "cast", or nullptr otherwise.
1094 /// For example, if we have:
1096 /// %E0 = extractelement <2 x double> %U, i32 0
1097 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1098 /// %E1 = extractelement <2 x double> %U, i32 1
1099 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1101 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1102 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1103 /// Note that %U may contain non-undef values where %V1 has undef.
1104 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1106 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1107 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1110 auto *W = E->getVectorOperand();
1115 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1116 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1118 V = IV->getAggregateOperand();
1120 if (!isa<UndefValue>(V) ||!U)
1123 auto *UT = cast<VectorType>(U->getType());
1124 auto *VT = V->getType();
1125 // Check that types UT and VT are bitwise isomorphic.
1126 const auto &DL = IC.getDataLayout();
1127 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1130 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1131 if (AT->getNumElements() != UT->getNumElements())
1134 auto *ST = cast<StructType>(VT);
1135 if (ST->getNumElements() != UT->getNumElements())
1137 for (const auto *EltT : ST->elements()) {
1138 if (EltT != UT->getElementType())
1145 /// \brief Combine stores to match the type of value being stored.
1147 /// The core idea here is that the memory does not have any intrinsic type and
1148 /// where we can we should match the type of a store to the type of value being
1151 /// However, this routine must never change the width of a store or the number of
1152 /// stores as that would introduce a semantic change. This combine is expected to
1153 /// be a semantic no-op which just allows stores to more closely model the types
1154 /// of their incoming values.
1156 /// Currently, we also refuse to change the precise type used for an atomic or
1157 /// volatile store. This is debatable, and might be reasonable to change later.
1158 /// However, it is risky in case some backend or other part of LLVM is relying
1159 /// on the exact type stored to select appropriate atomic operations.
1161 /// \returns true if the store was successfully combined away. This indicates
1162 /// the caller must erase the store instruction. We have to let the caller erase
1163 /// the store instruction as otherwise there is no way to signal whether it was
1164 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1165 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1166 // FIXME: We could probably with some care handle both volatile and ordered
1167 // atomic stores here but it isn't clear that this is important.
1168 if (!SI.isUnordered())
1171 // swifterror values can't be bitcasted.
1172 if (SI.getPointerOperand()->isSwiftError())
1175 Value *V = SI.getValueOperand();
1177 // Fold away bit casts of the stored value by storing the original type.
1178 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1179 V = BC->getOperand(0);
1180 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1181 combineStoreToNewValue(IC, SI, V);
1186 if (Value *U = likeBitCastFromVector(IC, V))
1187 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1188 combineStoreToNewValue(IC, SI, U);
1192 // FIXME: We should also canonicalize stores of vectors when their elements
1193 // are cast to other types.
1197 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1198 // FIXME: We could probably with some care handle both volatile and atomic
1199 // stores here but it isn't clear that this is important.
1203 Value *V = SI.getValueOperand();
1204 Type *T = V->getType();
1206 if (!T->isAggregateType())
1209 if (auto *ST = dyn_cast<StructType>(T)) {
1210 // If the struct only have one element, we unpack.
1211 unsigned Count = ST->getNumElements();
1213 V = IC.Builder.CreateExtractValue(V, 0);
1214 combineStoreToNewValue(IC, SI, V);
1218 // We don't want to break loads with padding here as we'd loose
1219 // the knowledge that padding exists for the rest of the pipeline.
1220 const DataLayout &DL = IC.getDataLayout();
1221 auto *SL = DL.getStructLayout(ST);
1222 if (SL->hasPadding())
1225 auto Align = SI.getAlignment();
1227 Align = DL.getABITypeAlignment(ST);
1229 SmallString<16> EltName = V->getName();
1231 auto *Addr = SI.getPointerOperand();
1232 SmallString<16> AddrName = Addr->getName();
1233 AddrName += ".repack";
1235 auto *IdxType = Type::getInt32Ty(ST->getContext());
1236 auto *Zero = ConstantInt::get(IdxType, 0);
1237 for (unsigned i = 0; i < Count; i++) {
1238 Value *Indices[2] = {
1240 ConstantInt::get(IdxType, i),
1242 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1244 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1245 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1246 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1248 SI.getAAMetadata(AAMD);
1249 NS->setAAMetadata(AAMD);
1255 if (auto *AT = dyn_cast<ArrayType>(T)) {
1256 // If the array only have one element, we unpack.
1257 auto NumElements = AT->getNumElements();
1258 if (NumElements == 1) {
1259 V = IC.Builder.CreateExtractValue(V, 0);
1260 combineStoreToNewValue(IC, SI, V);
1264 // Bail out if the array is too large. Ideally we would like to optimize
1265 // arrays of arbitrary size but this has a terrible impact on compile time.
1266 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1268 if (NumElements > IC.MaxArraySizeForCombine)
1271 const DataLayout &DL = IC.getDataLayout();
1272 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1273 auto Align = SI.getAlignment();
1275 Align = DL.getABITypeAlignment(T);
1277 SmallString<16> EltName = V->getName();
1279 auto *Addr = SI.getPointerOperand();
1280 SmallString<16> AddrName = Addr->getName();
1281 AddrName += ".repack";
1283 auto *IdxType = Type::getInt64Ty(T->getContext());
1284 auto *Zero = ConstantInt::get(IdxType, 0);
1286 uint64_t Offset = 0;
1287 for (uint64_t i = 0; i < NumElements; i++) {
1288 Value *Indices[2] = {
1290 ConstantInt::get(IdxType, i),
1292 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1294 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1295 auto EltAlign = MinAlign(Align, Offset);
1296 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1298 SI.getAAMetadata(AAMD);
1299 NS->setAAMetadata(AAMD);
1309 /// equivalentAddressValues - Test if A and B will obviously have the same
1310 /// value. This includes recognizing that %t0 and %t1 will have the same
1311 /// value in code like this:
1312 /// %t0 = getelementptr \@a, 0, 3
1313 /// store i32 0, i32* %t0
1314 /// %t1 = getelementptr \@a, 0, 3
1315 /// %t2 = load i32* %t1
1317 static bool equivalentAddressValues(Value *A, Value *B) {
1318 // Test if the values are trivially equivalent.
1319 if (A == B) return true;
1321 // Test if the values come form identical arithmetic instructions.
1322 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1323 // its only used to compare two uses within the same basic block, which
1324 // means that they'll always either have the same value or one of them
1325 // will have an undefined value.
1326 if (isa<BinaryOperator>(A) ||
1329 isa<GetElementPtrInst>(A))
1330 if (Instruction *BI = dyn_cast<Instruction>(B))
1331 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1334 // Otherwise they may not be equivalent.
1338 /// Converts store (bitcast (load (bitcast (select ...)))) to
1339 /// store (load (select ...)), where select is minmax:
1340 /// select ((cmp load V1, load V2), V1, V2).
1341 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1344 if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1348 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1350 auto *LI = cast<LoadInst>(SI.getValueOperand());
1351 if (!LI->getType()->isIntegerTy())
1353 if (!isMinMaxWithLoads(LoadAddr))
1356 if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1357 auto *SI = dyn_cast<StoreInst>(U);
1358 return SI && SI->getPointerOperand() != LI &&
1359 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1360 !SI->getPointerOperand()->isSwiftError();
1364 IC.Builder.SetInsertPoint(LI);
1365 LoadInst *NewLI = combineLoadToNewType(
1366 IC, *LI, LoadAddr->getType()->getPointerElementType());
1367 // Replace all the stores with stores of the newly loaded value.
1368 for (auto *UI : LI->users()) {
1369 auto *USI = cast<StoreInst>(UI);
1370 IC.Builder.SetInsertPoint(USI);
1371 combineStoreToNewValue(IC, *USI, NewLI);
1373 IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1374 IC.eraseInstFromFunction(*LI);
1378 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1379 Value *Val = SI.getOperand(0);
1380 Value *Ptr = SI.getOperand(1);
1382 // Try to canonicalize the stored type.
1383 if (combineStoreToValueType(*this, SI))
1384 return eraseInstFromFunction(SI);
1386 // Attempt to improve the alignment.
1387 unsigned KnownAlign = getOrEnforceKnownAlignment(
1388 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
1389 unsigned StoreAlign = SI.getAlignment();
1390 unsigned EffectiveStoreAlign =
1391 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1393 if (KnownAlign > EffectiveStoreAlign)
1394 SI.setAlignment(KnownAlign);
1395 else if (StoreAlign == 0)
1396 SI.setAlignment(EffectiveStoreAlign);
1398 // Try to canonicalize the stored type.
1399 if (unpackStoreToAggregate(*this, SI))
1400 return eraseInstFromFunction(SI);
1402 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1403 return eraseInstFromFunction(SI);
1405 // Replace GEP indices if possible.
1406 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1407 Worklist.Add(NewGEPI);
1411 // Don't hack volatile/ordered stores.
1412 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1413 if (!SI.isUnordered()) return nullptr;
1415 // If the RHS is an alloca with a single use, zapify the store, making the
1417 if (Ptr->hasOneUse()) {
1418 if (isa<AllocaInst>(Ptr))
1419 return eraseInstFromFunction(SI);
1420 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1421 if (isa<AllocaInst>(GEP->getOperand(0))) {
1422 if (GEP->getOperand(0)->hasOneUse())
1423 return eraseInstFromFunction(SI);
1428 // Do really simple DSE, to catch cases where there are several consecutive
1429 // stores to the same location, separated by a few arithmetic operations. This
1430 // situation often occurs with bitfield accesses.
1431 BasicBlock::iterator BBI(SI);
1432 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1435 // Don't count debug info directives, lest they affect codegen,
1436 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1437 if (isa<DbgInfoIntrinsic>(BBI) ||
1438 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1443 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1444 // Prev store isn't volatile, and stores to the same location?
1445 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1446 SI.getOperand(1))) {
1449 eraseInstFromFunction(*PrevSI);
1455 // If this is a load, we have to stop. However, if the loaded value is from
1456 // the pointer we're loading and is producing the pointer we're storing,
1457 // then *this* store is dead (X = load P; store X -> P).
1458 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1459 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1460 assert(SI.isUnordered() && "can't eliminate ordering operation");
1461 return eraseInstFromFunction(SI);
1464 // Otherwise, this is a load from some other location. Stores before it
1469 // Don't skip over loads, throws or things that can modify memory.
1470 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1474 // store X, null -> turns into 'unreachable' in SimplifyCFG
1475 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1476 if (canSimplifyNullStoreOrGEP(SI)) {
1477 if (!isa<UndefValue>(Val)) {
1478 SI.setOperand(0, UndefValue::get(Val->getType()));
1479 if (Instruction *U = dyn_cast<Instruction>(Val))
1480 Worklist.Add(U); // Dropped a use.
1482 return nullptr; // Do not modify these!
1485 // store undef, Ptr -> noop
1486 if (isa<UndefValue>(Val))
1487 return eraseInstFromFunction(SI);
1489 // If this store is the last instruction in the basic block (possibly
1490 // excepting debug info instructions), and if the block ends with an
1491 // unconditional branch, try to move it to the successor block.
1492 BBI = SI.getIterator();
1495 } while (isa<DbgInfoIntrinsic>(BBI) ||
1496 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1497 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1498 if (BI->isUnconditional())
1499 if (SimplifyStoreAtEndOfBlock(SI))
1500 return nullptr; // xform done!
1505 /// SimplifyStoreAtEndOfBlock - Turn things like:
1506 /// if () { *P = v1; } else { *P = v2 }
1507 /// into a phi node with a store in the successor.
1509 /// Simplify things like:
1510 /// *P = v1; if () { *P = v2; }
1511 /// into a phi node with a store in the successor.
1513 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1514 assert(SI.isUnordered() &&
1515 "this code has not been auditted for volatile or ordered store case");
1517 BasicBlock *StoreBB = SI.getParent();
1519 // Check to see if the successor block has exactly two incoming edges. If
1520 // so, see if the other predecessor contains a store to the same location.
1521 // if so, insert a PHI node (if needed) and move the stores down.
1522 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1524 // Determine whether Dest has exactly two predecessors and, if so, compute
1525 // the other predecessor.
1526 pred_iterator PI = pred_begin(DestBB);
1527 BasicBlock *P = *PI;
1528 BasicBlock *OtherBB = nullptr;
1533 if (++PI == pred_end(DestBB))
1542 if (++PI != pred_end(DestBB))
1545 // Bail out if all the relevant blocks aren't distinct (this can happen,
1546 // for example, if SI is in an infinite loop)
1547 if (StoreBB == DestBB || OtherBB == DestBB)
1550 // Verify that the other block ends in a branch and is not otherwise empty.
1551 BasicBlock::iterator BBI(OtherBB->getTerminator());
1552 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1553 if (!OtherBr || BBI == OtherBB->begin())
1556 // If the other block ends in an unconditional branch, check for the 'if then
1557 // else' case. there is an instruction before the branch.
1558 StoreInst *OtherStore = nullptr;
1559 if (OtherBr->isUnconditional()) {
1561 // Skip over debugging info.
1562 while (isa<DbgInfoIntrinsic>(BBI) ||
1563 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1564 if (BBI==OtherBB->begin())
1568 // If this isn't a store, isn't a store to the same location, or is not the
1569 // right kind of store, bail out.
1570 OtherStore = dyn_cast<StoreInst>(BBI);
1571 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1572 !SI.isSameOperationAs(OtherStore))
1575 // Otherwise, the other block ended with a conditional branch. If one of the
1576 // destinations is StoreBB, then we have the if/then case.
1577 if (OtherBr->getSuccessor(0) != StoreBB &&
1578 OtherBr->getSuccessor(1) != StoreBB)
1581 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1582 // if/then triangle. See if there is a store to the same ptr as SI that
1583 // lives in OtherBB.
1585 // Check to see if we find the matching store.
1586 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1587 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1588 !SI.isSameOperationAs(OtherStore))
1592 // If we find something that may be using or overwriting the stored
1593 // value, or if we run out of instructions, we can't do the xform.
1594 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1595 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1599 // In order to eliminate the store in OtherBr, we have to
1600 // make sure nothing reads or overwrites the stored value in
1602 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1603 // FIXME: This should really be AA driven.
1604 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1609 // Insert a PHI node now if we need it.
1610 Value *MergedVal = OtherStore->getOperand(0);
1611 if (MergedVal != SI.getOperand(0)) {
1612 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1613 PN->addIncoming(SI.getOperand(0), SI.getParent());
1614 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1615 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1618 // Advance to a place where it is safe to insert the new store and
1620 BBI = DestBB->getFirstInsertionPt();
1621 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1625 SI.getSyncScopeID());
1626 InsertNewInstBefore(NewSI, *BBI);
1627 // The debug locations of the original instructions might differ; merge them.
1628 NewSI->applyMergedLocation(SI.getDebugLoc(), OtherStore->getDebugLoc());
1630 // If the two stores had AA tags, merge them.
1632 SI.getAAMetadata(AATags);
1634 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1635 NewSI->setAAMetadata(AATags);
1638 // Nuke the old stores.
1639 eraseInstFromFunction(SI);
1640 eraseInstFromFunction(*OtherStore);