1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/MapVector.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/DebugInfoMetadata.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/MDBuilder.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
29 using namespace PatternMatch;
31 #define DEBUG_TYPE "instcombine"
33 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
34 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
36 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
37 /// some part of a constant global variable. This intentionally only accepts
38 /// constant expressions because we can't rewrite arbitrary instructions.
39 static bool pointsToConstantGlobal(Value *V) {
40 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
41 return GV->isConstant();
43 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
44 if (CE->getOpcode() == Instruction::BitCast ||
45 CE->getOpcode() == Instruction::AddrSpaceCast ||
46 CE->getOpcode() == Instruction::GetElementPtr)
47 return pointsToConstantGlobal(CE->getOperand(0));
52 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
53 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
54 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
55 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
56 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
57 /// the alloca, and if the source pointer is a pointer to a constant global, we
58 /// can optimize this.
60 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
61 SmallVectorImpl<Instruction *> &ToDelete) {
62 // We track lifetime intrinsics as we encounter them. If we decide to go
63 // ahead and replace the value with the global, this lets the caller quickly
64 // eliminate the markers.
66 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
67 ValuesToInspect.emplace_back(V, false);
68 while (!ValuesToInspect.empty()) {
69 auto ValuePair = ValuesToInspect.pop_back_val();
70 const bool IsOffset = ValuePair.second;
71 for (auto &U : ValuePair.first->uses()) {
72 auto *I = cast<Instruction>(U.getUser());
74 if (auto *LI = dyn_cast<LoadInst>(I)) {
75 // Ignore non-volatile loads, they are always ok.
76 if (!LI->isSimple()) return false;
80 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
81 // If uses of the bitcast are ok, we are ok.
82 ValuesToInspect.emplace_back(I, IsOffset);
85 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
86 // If the GEP has all zero indices, it doesn't offset the pointer. If it
88 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
92 if (auto CS = CallSite(I)) {
93 // If this is the function being called then we treat it like a load and
98 unsigned DataOpNo = CS.getDataOperandNo(&U);
99 bool IsArgOperand = CS.isArgOperand(&U);
101 // Inalloca arguments are clobbered by the call.
102 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
105 // If this is a readonly/readnone call site, then we know it is just a
106 // load (but one that potentially returns the value itself), so we can
107 // ignore it if we know that the value isn't captured.
108 if (CS.onlyReadsMemory() &&
109 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
112 // If this is being passed as a byval argument, the caller is making a
113 // copy, so it is only a read of the alloca.
114 if (IsArgOperand && CS.isByValArgument(DataOpNo))
118 // Lifetime intrinsics can be handled by the caller.
119 if (I->isLifetimeStartOrEnd()) {
120 assert(I->use_empty() && "Lifetime markers have no result to use!");
121 ToDelete.push_back(I);
125 // If this is isn't our memcpy/memmove, reject it as something we can't
127 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
131 // If the transfer is using the alloca as a source of the transfer, then
132 // ignore it since it is a load (unless the transfer is volatile).
133 if (U.getOperandNo() == 1) {
134 if (MI->isVolatile()) return false;
138 // If we already have seen a copy, reject the second one.
139 if (TheCopy) return false;
141 // If the pointer has been offset from the start of the alloca, we can't
142 // safely handle this.
143 if (IsOffset) return false;
145 // If the memintrinsic isn't using the alloca as the dest, reject it.
146 if (U.getOperandNo() != 0) return false;
148 // If the source of the memcpy/move is not a constant global, reject it.
149 if (!pointsToConstantGlobal(MI->getSource()))
152 // Otherwise, the transform is safe. Remember the copy instruction.
159 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
160 /// modified by a copy from a constant global. If we can prove this, we can
161 /// replace any uses of the alloca with uses of the global directly.
162 static MemTransferInst *
163 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
164 SmallVectorImpl<Instruction *> &ToDelete) {
165 MemTransferInst *TheCopy = nullptr;
166 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
171 /// Returns true if V is dereferenceable for size of alloca.
172 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
173 const DataLayout &DL) {
174 if (AI->isArrayAllocation())
176 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
179 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
180 APInt(64, AllocaSize), DL);
183 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
184 // Check for array size of 1 (scalar allocation).
185 if (!AI.isArrayAllocation()) {
186 // i32 1 is the canonical array size for scalar allocations.
187 if (AI.getArraySize()->getType()->isIntegerTy(32))
191 Value *V = IC.Builder.getInt32(1);
196 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
197 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
198 if (C->getValue().getActiveBits() <= 64) {
199 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
200 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
201 New->setAlignment(AI.getAlignment());
203 // Scan to the end of the allocation instructions, to skip over a block of
204 // allocas if possible...also skip interleaved debug info
206 BasicBlock::iterator It(New);
207 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
210 // Now that I is pointing to the first non-allocation-inst in the block,
211 // insert our getelementptr instruction...
213 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
214 Value *NullIdx = Constant::getNullValue(IdxTy);
215 Value *Idx[2] = {NullIdx, NullIdx};
217 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
218 IC.InsertNewInstBefore(GEP, *It);
220 // Now make everything use the getelementptr instead of the original
222 return IC.replaceInstUsesWith(AI, GEP);
226 if (isa<UndefValue>(AI.getArraySize()))
227 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
229 // Ensure that the alloca array size argument has type intptr_t, so that
230 // any casting is exposed early.
231 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
232 if (AI.getArraySize()->getType() != IntPtrTy) {
233 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
242 // If I and V are pointers in different address space, it is not allowed to
243 // use replaceAllUsesWith since I and V have different types. A
244 // non-target-specific transformation should not use addrspacecast on V since
245 // the two address space may be disjoint depending on target.
247 // This class chases down uses of the old pointer until reaching the load
248 // instructions, then replaces the old pointer in the load instructions with
249 // the new pointer. If during the chasing it sees bitcast or GEP, it will
250 // create new bitcast or GEP with the new pointer and use them in the load
252 class PointerReplacer {
254 PointerReplacer(InstCombiner &IC) : IC(IC) {}
255 void replacePointer(Instruction &I, Value *V);
258 void findLoadAndReplace(Instruction &I);
259 void replace(Instruction *I);
260 Value *getReplacement(Value *I);
262 SmallVector<Instruction *, 4> Path;
263 MapVector<Value *, Value *> WorkMap;
266 } // end anonymous namespace
268 void PointerReplacer::findLoadAndReplace(Instruction &I) {
269 for (auto U : I.users()) {
270 auto *Inst = dyn_cast<Instruction>(&*U);
273 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
274 if (isa<LoadInst>(Inst)) {
278 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
279 Path.push_back(Inst);
280 findLoadAndReplace(*Inst);
288 Value *PointerReplacer::getReplacement(Value *V) {
289 auto Loc = WorkMap.find(V);
290 if (Loc != WorkMap.end())
295 void PointerReplacer::replace(Instruction *I) {
296 if (getReplacement(I))
299 if (auto *LT = dyn_cast<LoadInst>(I)) {
300 auto *V = getReplacement(LT->getPointerOperand());
301 assert(V && "Operand not replaced");
302 auto *NewI = new LoadInst(V);
304 IC.InsertNewInstWith(NewI, *LT);
305 IC.replaceInstUsesWith(*LT, NewI);
307 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
308 auto *V = getReplacement(GEP->getPointerOperand());
309 assert(V && "Operand not replaced");
310 SmallVector<Value *, 8> Indices;
311 Indices.append(GEP->idx_begin(), GEP->idx_end());
312 auto *NewI = GetElementPtrInst::Create(
313 V->getType()->getPointerElementType(), V, Indices);
314 IC.InsertNewInstWith(NewI, *GEP);
317 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
318 auto *V = getReplacement(BC->getOperand(0));
319 assert(V && "Operand not replaced");
320 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
321 V->getType()->getPointerAddressSpace());
322 auto *NewI = new BitCastInst(V, NewT);
323 IC.InsertNewInstWith(NewI, *BC);
327 llvm_unreachable("should never reach here");
331 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
333 auto *PT = cast<PointerType>(I.getType());
334 auto *NT = cast<PointerType>(V->getType());
335 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
339 findLoadAndReplace(I);
342 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
343 if (auto *I = simplifyAllocaArraySize(*this, AI))
346 if (AI.getAllocatedType()->isSized()) {
347 // If the alignment is 0 (unspecified), assign it the preferred alignment.
348 if (AI.getAlignment() == 0)
349 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
351 // Move all alloca's of zero byte objects to the entry block and merge them
352 // together. Note that we only do this for alloca's, because malloc should
353 // allocate and return a unique pointer, even for a zero byte allocation.
354 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
355 // For a zero sized alloca there is no point in doing an array allocation.
356 // This is helpful if the array size is a complicated expression not used
358 if (AI.isArrayAllocation()) {
359 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
363 // Get the first instruction in the entry block.
364 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
365 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
366 if (FirstInst != &AI) {
367 // If the entry block doesn't start with a zero-size alloca then move
368 // this one to the start of the entry block. There is no problem with
369 // dominance as the array size was forced to a constant earlier already.
370 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
371 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
372 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
373 AI.moveBefore(FirstInst);
377 // If the alignment of the entry block alloca is 0 (unspecified),
378 // assign it the preferred alignment.
379 if (EntryAI->getAlignment() == 0)
380 EntryAI->setAlignment(
381 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
382 // Replace this zero-sized alloca with the one at the start of the entry
383 // block after ensuring that the address will be aligned enough for both
385 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
387 EntryAI->setAlignment(MaxAlign);
388 if (AI.getType() != EntryAI->getType())
389 return new BitCastInst(EntryAI, AI.getType());
390 return replaceInstUsesWith(AI, EntryAI);
395 if (AI.getAlignment()) {
396 // Check to see if this allocation is only modified by a memcpy/memmove from
397 // a constant global whose alignment is equal to or exceeds that of the
398 // allocation. If this is the case, we can change all users to use
399 // the constant global instead. This is commonly produced by the CFE by
400 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
401 // is only subsequently read.
402 SmallVector<Instruction *, 4> ToDelete;
403 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
404 unsigned SourceAlign = getOrEnforceKnownAlignment(
405 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
406 if (AI.getAlignment() <= SourceAlign &&
407 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
408 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
409 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
410 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
411 eraseInstFromFunction(*ToDelete[i]);
412 Constant *TheSrc = cast<Constant>(Copy->getSource());
413 auto *SrcTy = TheSrc->getType();
414 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
415 SrcTy->getPointerAddressSpace());
417 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
418 if (AI.getType()->getPointerAddressSpace() ==
419 SrcTy->getPointerAddressSpace()) {
420 Instruction *NewI = replaceInstUsesWith(AI, Cast);
421 eraseInstFromFunction(*Copy);
425 PointerReplacer PtrReplacer(*this);
426 PtrReplacer.replacePointer(AI, Cast);
433 // At last, use the generic allocation site handler to aggressively remove
435 return visitAllocSite(AI);
438 // Are we allowed to form a atomic load or store of this type?
439 static bool isSupportedAtomicType(Type *Ty) {
440 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
443 /// Helper to combine a load to a new type.
445 /// This just does the work of combining a load to a new type. It handles
446 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
447 /// loaded *value* type. This will convert it to a pointer, cast the operand to
448 /// that pointer type, load it, etc.
450 /// Note that this will create all of the instructions with whatever insert
451 /// point the \c InstCombiner currently is using.
452 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
453 const Twine &Suffix = "") {
454 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
455 "can't fold an atomic load to requested type");
457 Value *Ptr = LI.getPointerOperand();
458 unsigned AS = LI.getPointerAddressSpace();
459 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
460 LI.getAllMetadata(MD);
462 Value *NewPtr = nullptr;
463 if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
464 NewPtr->getType()->getPointerElementType() == NewTy &&
465 NewPtr->getType()->getPointerAddressSpace() == AS))
466 NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
468 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
469 NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
470 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
471 MDBuilder MDB(NewLoad->getContext());
472 for (const auto &MDPair : MD) {
473 unsigned ID = MDPair.first;
474 MDNode *N = MDPair.second;
475 // Note, essentially every kind of metadata should be preserved here! This
476 // routine is supposed to clone a load instruction changing *only its type*.
477 // The only metadata it makes sense to drop is metadata which is invalidated
478 // when the pointer type changes. This should essentially never be the case
479 // in LLVM, but we explicitly switch over only known metadata to be
480 // conservatively correct. If you are adding metadata to LLVM which pertains
481 // to loads, you almost certainly want to add it here.
483 case LLVMContext::MD_dbg:
484 case LLVMContext::MD_tbaa:
485 case LLVMContext::MD_prof:
486 case LLVMContext::MD_fpmath:
487 case LLVMContext::MD_tbaa_struct:
488 case LLVMContext::MD_invariant_load:
489 case LLVMContext::MD_alias_scope:
490 case LLVMContext::MD_noalias:
491 case LLVMContext::MD_nontemporal:
492 case LLVMContext::MD_mem_parallel_loop_access:
493 case LLVMContext::MD_access_group:
494 // All of these directly apply.
495 NewLoad->setMetadata(ID, N);
498 case LLVMContext::MD_nonnull:
499 copyNonnullMetadata(LI, N, *NewLoad);
501 case LLVMContext::MD_align:
502 case LLVMContext::MD_dereferenceable:
503 case LLVMContext::MD_dereferenceable_or_null:
504 // These only directly apply if the new type is also a pointer.
505 if (NewTy->isPointerTy())
506 NewLoad->setMetadata(ID, N);
508 case LLVMContext::MD_range:
509 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad);
516 /// Combine a store to a new type.
518 /// Returns the newly created store instruction.
519 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
520 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
521 "can't fold an atomic store of requested type");
523 Value *Ptr = SI.getPointerOperand();
524 unsigned AS = SI.getPointerAddressSpace();
525 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
526 SI.getAllMetadata(MD);
528 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
529 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
530 SI.getAlignment(), SI.isVolatile());
531 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
532 for (const auto &MDPair : MD) {
533 unsigned ID = MDPair.first;
534 MDNode *N = MDPair.second;
535 // Note, essentially every kind of metadata should be preserved here! This
536 // routine is supposed to clone a store instruction changing *only its
537 // type*. The only metadata it makes sense to drop is metadata which is
538 // invalidated when the pointer type changes. This should essentially
539 // never be the case in LLVM, but we explicitly switch over only known
540 // metadata to be conservatively correct. If you are adding metadata to
541 // LLVM which pertains to stores, you almost certainly want to add it
544 case LLVMContext::MD_dbg:
545 case LLVMContext::MD_tbaa:
546 case LLVMContext::MD_prof:
547 case LLVMContext::MD_fpmath:
548 case LLVMContext::MD_tbaa_struct:
549 case LLVMContext::MD_alias_scope:
550 case LLVMContext::MD_noalias:
551 case LLVMContext::MD_nontemporal:
552 case LLVMContext::MD_mem_parallel_loop_access:
553 case LLVMContext::MD_access_group:
554 // All of these directly apply.
555 NewStore->setMetadata(ID, N);
557 case LLVMContext::MD_invariant_load:
558 case LLVMContext::MD_nonnull:
559 case LLVMContext::MD_range:
560 case LLVMContext::MD_align:
561 case LLVMContext::MD_dereferenceable:
562 case LLVMContext::MD_dereferenceable_or_null:
563 // These don't apply for stores.
571 /// Returns true if instruction represent minmax pattern like:
572 /// select ((cmp load V1, load V2), V1, V2).
573 static bool isMinMaxWithLoads(Value *V) {
574 assert(V->getType()->isPointerTy() && "Expected pointer type.");
575 // Ignore possible ty* to ixx* bitcast.
576 V = peekThroughBitcast(V);
577 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
579 CmpInst::Predicate Pred;
584 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
585 m_Value(LHS), m_Value(RHS))))
587 return (match(L1, m_Load(m_Specific(LHS))) &&
588 match(L2, m_Load(m_Specific(RHS)))) ||
589 (match(L1, m_Load(m_Specific(RHS))) &&
590 match(L2, m_Load(m_Specific(LHS))));
593 /// Combine loads to match the type of their uses' value after looking
594 /// through intervening bitcasts.
596 /// The core idea here is that if the result of a load is used in an operation,
597 /// we should load the type most conducive to that operation. For example, when
598 /// loading an integer and converting that immediately to a pointer, we should
599 /// instead directly load a pointer.
601 /// However, this routine must never change the width of a load or the number of
602 /// loads as that would introduce a semantic change. This combine is expected to
603 /// be a semantic no-op which just allows loads to more closely model the types
604 /// of their consuming operations.
606 /// Currently, we also refuse to change the precise type used for an atomic load
607 /// or a volatile load. This is debatable, and might be reasonable to change
608 /// later. However, it is risky in case some backend or other part of LLVM is
609 /// relying on the exact type loaded to select appropriate atomic operations.
610 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
611 // FIXME: We could probably with some care handle both volatile and ordered
612 // atomic loads here but it isn't clear that this is important.
613 if (!LI.isUnordered())
619 // swifterror values can't be bitcasted.
620 if (LI.getPointerOperand()->isSwiftError())
623 Type *Ty = LI.getType();
624 const DataLayout &DL = IC.getDataLayout();
626 // Try to canonicalize loads which are only ever stored to operate over
627 // integers instead of any other type. We only do this when the loaded type
628 // is sized and has a size exactly the same as its store size and the store
629 // size is a legal integer type.
630 // Do not perform canonicalization if minmax pattern is found (to avoid
632 if (!Ty->isIntegerTy() && Ty->isSized() &&
633 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
634 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
635 !DL.isNonIntegralPointerType(Ty) &&
637 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true))) {
638 if (all_of(LI.users(), [&LI](User *U) {
639 auto *SI = dyn_cast<StoreInst>(U);
640 return SI && SI->getPointerOperand() != &LI &&
641 !SI->getPointerOperand()->isSwiftError();
643 LoadInst *NewLoad = combineLoadToNewType(
645 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
646 // Replace all the stores with stores of the newly loaded value.
647 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
648 auto *SI = cast<StoreInst>(*UI++);
649 IC.Builder.SetInsertPoint(SI);
650 combineStoreToNewValue(IC, *SI, NewLoad);
651 IC.eraseInstFromFunction(*SI);
653 assert(LI.use_empty() && "Failed to remove all users of the load!");
654 // Return the old load so the combiner can delete it safely.
659 // Fold away bit casts of the loaded value by loading the desired type.
660 // We can do this for BitCastInsts as well as casts from and to pointer types,
661 // as long as those are noops (i.e., the source or dest type have the same
662 // bitwidth as the target's pointers).
664 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
665 if (CI->isNoopCast(DL))
666 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
667 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
668 CI->replaceAllUsesWith(NewLoad);
669 IC.eraseInstFromFunction(*CI);
673 // FIXME: We should also canonicalize loads of vectors when their elements are
674 // cast to other types.
678 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
679 // FIXME: We could probably with some care handle both volatile and atomic
680 // stores here but it isn't clear that this is important.
684 Type *T = LI.getType();
685 if (!T->isAggregateType())
688 StringRef Name = LI.getName();
689 assert(LI.getAlignment() && "Alignment must be set at this point");
691 if (auto *ST = dyn_cast<StructType>(T)) {
692 // If the struct only have one element, we unpack.
693 auto NumElements = ST->getNumElements();
694 if (NumElements == 1) {
695 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
698 LI.getAAMetadata(AAMD);
699 NewLoad->setAAMetadata(AAMD);
700 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
701 UndefValue::get(T), NewLoad, 0, Name));
704 // We don't want to break loads with padding here as we'd loose
705 // the knowledge that padding exists for the rest of the pipeline.
706 const DataLayout &DL = IC.getDataLayout();
707 auto *SL = DL.getStructLayout(ST);
708 if (SL->hasPadding())
711 auto Align = LI.getAlignment();
713 Align = DL.getABITypeAlignment(ST);
715 auto *Addr = LI.getPointerOperand();
716 auto *IdxType = Type::getInt32Ty(T->getContext());
717 auto *Zero = ConstantInt::get(IdxType, 0);
719 Value *V = UndefValue::get(T);
720 for (unsigned i = 0; i < NumElements; i++) {
721 Value *Indices[2] = {
723 ConstantInt::get(IdxType, i),
725 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
727 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
728 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
729 // Propagate AA metadata. It'll still be valid on the narrowed load.
731 LI.getAAMetadata(AAMD);
732 L->setAAMetadata(AAMD);
733 V = IC.Builder.CreateInsertValue(V, L, i);
737 return IC.replaceInstUsesWith(LI, V);
740 if (auto *AT = dyn_cast<ArrayType>(T)) {
741 auto *ET = AT->getElementType();
742 auto NumElements = AT->getNumElements();
743 if (NumElements == 1) {
744 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
746 LI.getAAMetadata(AAMD);
747 NewLoad->setAAMetadata(AAMD);
748 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
749 UndefValue::get(T), NewLoad, 0, Name));
752 // Bail out if the array is too large. Ideally we would like to optimize
753 // arrays of arbitrary size but this has a terrible impact on compile time.
754 // The threshold here is chosen arbitrarily, maybe needs a little bit of
756 if (NumElements > IC.MaxArraySizeForCombine)
759 const DataLayout &DL = IC.getDataLayout();
760 auto EltSize = DL.getTypeAllocSize(ET);
761 auto Align = LI.getAlignment();
763 Align = DL.getABITypeAlignment(T);
765 auto *Addr = LI.getPointerOperand();
766 auto *IdxType = Type::getInt64Ty(T->getContext());
767 auto *Zero = ConstantInt::get(IdxType, 0);
769 Value *V = UndefValue::get(T);
771 for (uint64_t i = 0; i < NumElements; i++) {
772 Value *Indices[2] = {
774 ConstantInt::get(IdxType, i),
776 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
778 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
781 LI.getAAMetadata(AAMD);
782 L->setAAMetadata(AAMD);
783 V = IC.Builder.CreateInsertValue(V, L, i);
788 return IC.replaceInstUsesWith(LI, V);
794 // If we can determine that all possible objects pointed to by the provided
795 // pointer value are, not only dereferenceable, but also definitively less than
796 // or equal to the provided maximum size, then return true. Otherwise, return
797 // false (constant global values and allocas fall into this category).
799 // FIXME: This should probably live in ValueTracking (or similar).
800 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
801 const DataLayout &DL) {
802 SmallPtrSet<Value *, 4> Visited;
803 SmallVector<Value *, 4> Worklist(1, V);
806 Value *P = Worklist.pop_back_val();
807 P = P->stripPointerCasts();
809 if (!Visited.insert(P).second)
812 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
813 Worklist.push_back(SI->getTrueValue());
814 Worklist.push_back(SI->getFalseValue());
818 if (PHINode *PN = dyn_cast<PHINode>(P)) {
819 for (Value *IncValue : PN->incoming_values())
820 Worklist.push_back(IncValue);
824 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
825 if (GA->isInterposable())
827 Worklist.push_back(GA->getAliasee());
831 // If we know how big this object is, and it is less than MaxSize, continue
832 // searching. Otherwise, return false.
833 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
834 if (!AI->getAllocatedType()->isSized())
837 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
841 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
842 // Make sure that, even if the multiplication below would wrap as an
843 // uint64_t, we still do the right thing.
844 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
849 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
850 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
853 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
854 if (InitSize > MaxSize)
860 } while (!Worklist.empty());
865 // If we're indexing into an object of a known size, and the outer index is
866 // not a constant, but having any value but zero would lead to undefined
867 // behavior, replace it with zero.
869 // For example, if we have:
870 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
872 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
873 // ... = load i32* %arrayidx, align 4
874 // Then we know that we can replace %x in the GEP with i64 0.
876 // FIXME: We could fold any GEP index to zero that would cause UB if it were
877 // not zero. Currently, we only handle the first such index. Also, we could
878 // also search through non-zero constant indices if we kept track of the
879 // offsets those indices implied.
880 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
881 Instruction *MemI, unsigned &Idx) {
882 if (GEPI->getNumOperands() < 2)
885 // Find the first non-zero index of a GEP. If all indices are zero, return
886 // one past the last index.
887 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
889 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
890 Value *V = GEPI->getOperand(I);
891 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
901 // Skip through initial 'zero' indices, and find the corresponding pointer
902 // type. See if the next index is not a constant.
903 Idx = FirstNZIdx(GEPI);
904 if (Idx == GEPI->getNumOperands())
906 if (isa<Constant>(GEPI->getOperand(Idx)))
909 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
911 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
912 if (!AllocTy || !AllocTy->isSized())
914 const DataLayout &DL = IC.getDataLayout();
915 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
917 // If there are more indices after the one we might replace with a zero, make
918 // sure they're all non-negative. If any of them are negative, the overall
919 // address being computed might be before the base address determined by the
920 // first non-zero index.
921 auto IsAllNonNegative = [&]() {
922 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
923 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
924 if (Known.isNonNegative())
932 // FIXME: If the GEP is not inbounds, and there are extra indices after the
933 // one we'll replace, those could cause the address computation to wrap
934 // (rendering the IsAllNonNegative() check below insufficient). We can do
935 // better, ignoring zero indices (and other indices we can prove small
936 // enough not to wrap).
937 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
940 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
941 // also known to be dereferenceable.
942 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
946 // If we're indexing into an object with a variable index for the memory
947 // access, but the object has only one element, we can assume that the index
948 // will always be zero. If we replace the GEP, return it.
949 template <typename T>
950 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
952 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
954 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
955 Instruction *NewGEPI = GEPI->clone();
956 NewGEPI->setOperand(Idx,
957 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
958 NewGEPI->insertBefore(GEPI);
959 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
967 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
968 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
971 auto *Ptr = SI.getPointerOperand();
972 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
973 Ptr = GEPI->getOperand(0);
974 return (isa<ConstantPointerNull>(Ptr) &&
975 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
978 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
979 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
980 const Value *GEPI0 = GEPI->getOperand(0);
981 if (isa<ConstantPointerNull>(GEPI0) &&
982 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
985 if (isa<UndefValue>(Op) ||
986 (isa<ConstantPointerNull>(Op) &&
987 !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
992 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
993 Value *Op = LI.getOperand(0);
995 // Try to canonicalize the loaded type.
996 if (Instruction *Res = combineLoadToOperationType(*this, LI))
999 // Attempt to improve the alignment.
1000 unsigned KnownAlign = getOrEnforceKnownAlignment(
1001 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
1002 unsigned LoadAlign = LI.getAlignment();
1003 unsigned EffectiveLoadAlign =
1004 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
1006 if (KnownAlign > EffectiveLoadAlign)
1007 LI.setAlignment(KnownAlign);
1008 else if (LoadAlign == 0)
1009 LI.setAlignment(EffectiveLoadAlign);
1011 // Replace GEP indices if possible.
1012 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
1013 Worklist.Add(NewGEPI);
1017 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1020 // Do really simple store-to-load forwarding and load CSE, to catch cases
1021 // where there are several consecutive memory accesses to the same location,
1022 // separated by a few arithmetic operations.
1023 BasicBlock::iterator BBI(LI);
1024 bool IsLoadCSE = false;
1025 if (Value *AvailableVal = FindAvailableLoadedValue(
1026 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1028 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
1030 return replaceInstUsesWith(
1031 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1032 LI.getName() + ".cast"));
1035 // None of the following transforms are legal for volatile/ordered atomic
1036 // loads. Most of them do apply for unordered atomics.
1037 if (!LI.isUnordered()) return nullptr;
1039 // load(gep null, ...) -> unreachable
1040 // load null/undef -> unreachable
1041 // TODO: Consider a target hook for valid address spaces for this xforms.
1042 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1043 // Insert a new store to null instruction before the load to indicate
1044 // that this code is not reachable. We do this instead of inserting
1045 // an unreachable instruction directly because we cannot modify the
1047 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1048 Constant::getNullValue(Op->getType()), &LI);
1049 SI->setDebugLoc(LI.getDebugLoc());
1050 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
1053 if (Op->hasOneUse()) {
1054 // Change select and PHI nodes to select values instead of addresses: this
1055 // helps alias analysis out a lot, allows many others simplifications, and
1056 // exposes redundancy in the code.
1058 // Note that we cannot do the transformation unless we know that the
1059 // introduced loads cannot trap! Something like this is valid as long as
1060 // the condition is always false: load (select bool %C, int* null, int* %G),
1061 // but it would not be valid if we transformed it to load from null
1064 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1065 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1066 unsigned Align = LI.getAlignment();
1067 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
1068 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
1069 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
1070 SI->getOperand(1)->getName()+".val");
1071 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
1072 SI->getOperand(2)->getName()+".val");
1073 assert(LI.isUnordered() && "implied by above");
1074 V1->setAlignment(Align);
1075 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1076 V2->setAlignment(Align);
1077 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1078 return SelectInst::Create(SI->getCondition(), V1, V2);
1081 // load (select (cond, null, P)) -> load P
1082 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1083 !NullPointerIsDefined(SI->getFunction(),
1084 LI.getPointerAddressSpace())) {
1085 LI.setOperand(0, SI->getOperand(2));
1089 // load (select (cond, P, null)) -> load P
1090 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1091 !NullPointerIsDefined(SI->getFunction(),
1092 LI.getPointerAddressSpace())) {
1093 LI.setOperand(0, SI->getOperand(1));
1101 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1103 /// \returns underlying value that was "cast", or nullptr otherwise.
1105 /// For example, if we have:
1107 /// %E0 = extractelement <2 x double> %U, i32 0
1108 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1109 /// %E1 = extractelement <2 x double> %U, i32 1
1110 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1112 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1113 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1114 /// Note that %U may contain non-undef values where %V1 has undef.
1115 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1117 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1118 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1121 auto *W = E->getVectorOperand();
1126 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1127 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1129 V = IV->getAggregateOperand();
1131 if (!isa<UndefValue>(V) ||!U)
1134 auto *UT = cast<VectorType>(U->getType());
1135 auto *VT = V->getType();
1136 // Check that types UT and VT are bitwise isomorphic.
1137 const auto &DL = IC.getDataLayout();
1138 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1141 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1142 if (AT->getNumElements() != UT->getNumElements())
1145 auto *ST = cast<StructType>(VT);
1146 if (ST->getNumElements() != UT->getNumElements())
1148 for (const auto *EltT : ST->elements()) {
1149 if (EltT != UT->getElementType())
1156 /// Combine stores to match the type of value being stored.
1158 /// The core idea here is that the memory does not have any intrinsic type and
1159 /// where we can we should match the type of a store to the type of value being
1162 /// However, this routine must never change the width of a store or the number of
1163 /// stores as that would introduce a semantic change. This combine is expected to
1164 /// be a semantic no-op which just allows stores to more closely model the types
1165 /// of their incoming values.
1167 /// Currently, we also refuse to change the precise type used for an atomic or
1168 /// volatile store. This is debatable, and might be reasonable to change later.
1169 /// However, it is risky in case some backend or other part of LLVM is relying
1170 /// on the exact type stored to select appropriate atomic operations.
1172 /// \returns true if the store was successfully combined away. This indicates
1173 /// the caller must erase the store instruction. We have to let the caller erase
1174 /// the store instruction as otherwise there is no way to signal whether it was
1175 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1176 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1177 // FIXME: We could probably with some care handle both volatile and ordered
1178 // atomic stores here but it isn't clear that this is important.
1179 if (!SI.isUnordered())
1182 // swifterror values can't be bitcasted.
1183 if (SI.getPointerOperand()->isSwiftError())
1186 Value *V = SI.getValueOperand();
1188 // Fold away bit casts of the stored value by storing the original type.
1189 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1190 V = BC->getOperand(0);
1191 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1192 combineStoreToNewValue(IC, SI, V);
1197 if (Value *U = likeBitCastFromVector(IC, V))
1198 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1199 combineStoreToNewValue(IC, SI, U);
1203 // FIXME: We should also canonicalize stores of vectors when their elements
1204 // are cast to other types.
1208 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1209 // FIXME: We could probably with some care handle both volatile and atomic
1210 // stores here but it isn't clear that this is important.
1214 Value *V = SI.getValueOperand();
1215 Type *T = V->getType();
1217 if (!T->isAggregateType())
1220 if (auto *ST = dyn_cast<StructType>(T)) {
1221 // If the struct only have one element, we unpack.
1222 unsigned Count = ST->getNumElements();
1224 V = IC.Builder.CreateExtractValue(V, 0);
1225 combineStoreToNewValue(IC, SI, V);
1229 // We don't want to break loads with padding here as we'd loose
1230 // the knowledge that padding exists for the rest of the pipeline.
1231 const DataLayout &DL = IC.getDataLayout();
1232 auto *SL = DL.getStructLayout(ST);
1233 if (SL->hasPadding())
1236 auto Align = SI.getAlignment();
1238 Align = DL.getABITypeAlignment(ST);
1240 SmallString<16> EltName = V->getName();
1242 auto *Addr = SI.getPointerOperand();
1243 SmallString<16> AddrName = Addr->getName();
1244 AddrName += ".repack";
1246 auto *IdxType = Type::getInt32Ty(ST->getContext());
1247 auto *Zero = ConstantInt::get(IdxType, 0);
1248 for (unsigned i = 0; i < Count; i++) {
1249 Value *Indices[2] = {
1251 ConstantInt::get(IdxType, i),
1253 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1255 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1256 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1257 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1259 SI.getAAMetadata(AAMD);
1260 NS->setAAMetadata(AAMD);
1266 if (auto *AT = dyn_cast<ArrayType>(T)) {
1267 // If the array only have one element, we unpack.
1268 auto NumElements = AT->getNumElements();
1269 if (NumElements == 1) {
1270 V = IC.Builder.CreateExtractValue(V, 0);
1271 combineStoreToNewValue(IC, SI, V);
1275 // Bail out if the array is too large. Ideally we would like to optimize
1276 // arrays of arbitrary size but this has a terrible impact on compile time.
1277 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1279 if (NumElements > IC.MaxArraySizeForCombine)
1282 const DataLayout &DL = IC.getDataLayout();
1283 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1284 auto Align = SI.getAlignment();
1286 Align = DL.getABITypeAlignment(T);
1288 SmallString<16> EltName = V->getName();
1290 auto *Addr = SI.getPointerOperand();
1291 SmallString<16> AddrName = Addr->getName();
1292 AddrName += ".repack";
1294 auto *IdxType = Type::getInt64Ty(T->getContext());
1295 auto *Zero = ConstantInt::get(IdxType, 0);
1297 uint64_t Offset = 0;
1298 for (uint64_t i = 0; i < NumElements; i++) {
1299 Value *Indices[2] = {
1301 ConstantInt::get(IdxType, i),
1303 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1305 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1306 auto EltAlign = MinAlign(Align, Offset);
1307 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1309 SI.getAAMetadata(AAMD);
1310 NS->setAAMetadata(AAMD);
1320 /// equivalentAddressValues - Test if A and B will obviously have the same
1321 /// value. This includes recognizing that %t0 and %t1 will have the same
1322 /// value in code like this:
1323 /// %t0 = getelementptr \@a, 0, 3
1324 /// store i32 0, i32* %t0
1325 /// %t1 = getelementptr \@a, 0, 3
1326 /// %t2 = load i32* %t1
1328 static bool equivalentAddressValues(Value *A, Value *B) {
1329 // Test if the values are trivially equivalent.
1330 if (A == B) return true;
1332 // Test if the values come form identical arithmetic instructions.
1333 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1334 // its only used to compare two uses within the same basic block, which
1335 // means that they'll always either have the same value or one of them
1336 // will have an undefined value.
1337 if (isa<BinaryOperator>(A) ||
1340 isa<GetElementPtrInst>(A))
1341 if (Instruction *BI = dyn_cast<Instruction>(B))
1342 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1345 // Otherwise they may not be equivalent.
1349 /// Converts store (bitcast (load (bitcast (select ...)))) to
1350 /// store (load (select ...)), where select is minmax:
1351 /// select ((cmp load V1, load V2), V1, V2).
1352 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1355 if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1359 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1361 auto *LI = cast<LoadInst>(SI.getValueOperand());
1362 if (!LI->getType()->isIntegerTy())
1364 if (!isMinMaxWithLoads(LoadAddr))
1367 if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1368 auto *SI = dyn_cast<StoreInst>(U);
1369 return SI && SI->getPointerOperand() != LI &&
1370 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1371 !SI->getPointerOperand()->isSwiftError();
1375 IC.Builder.SetInsertPoint(LI);
1376 LoadInst *NewLI = combineLoadToNewType(
1377 IC, *LI, LoadAddr->getType()->getPointerElementType());
1378 // Replace all the stores with stores of the newly loaded value.
1379 for (auto *UI : LI->users()) {
1380 auto *USI = cast<StoreInst>(UI);
1381 IC.Builder.SetInsertPoint(USI);
1382 combineStoreToNewValue(IC, *USI, NewLI);
1384 IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1385 IC.eraseInstFromFunction(*LI);
1389 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1390 Value *Val = SI.getOperand(0);
1391 Value *Ptr = SI.getOperand(1);
1393 // Try to canonicalize the stored type.
1394 if (combineStoreToValueType(*this, SI))
1395 return eraseInstFromFunction(SI);
1397 // Attempt to improve the alignment.
1398 unsigned KnownAlign = getOrEnforceKnownAlignment(
1399 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
1400 unsigned StoreAlign = SI.getAlignment();
1401 unsigned EffectiveStoreAlign =
1402 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1404 if (KnownAlign > EffectiveStoreAlign)
1405 SI.setAlignment(KnownAlign);
1406 else if (StoreAlign == 0)
1407 SI.setAlignment(EffectiveStoreAlign);
1409 // Try to canonicalize the stored type.
1410 if (unpackStoreToAggregate(*this, SI))
1411 return eraseInstFromFunction(SI);
1413 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1414 return eraseInstFromFunction(SI);
1416 // Replace GEP indices if possible.
1417 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1418 Worklist.Add(NewGEPI);
1422 // Don't hack volatile/ordered stores.
1423 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1424 if (!SI.isUnordered()) return nullptr;
1426 // If the RHS is an alloca with a single use, zapify the store, making the
1428 if (Ptr->hasOneUse()) {
1429 if (isa<AllocaInst>(Ptr))
1430 return eraseInstFromFunction(SI);
1431 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1432 if (isa<AllocaInst>(GEP->getOperand(0))) {
1433 if (GEP->getOperand(0)->hasOneUse())
1434 return eraseInstFromFunction(SI);
1439 // Do really simple DSE, to catch cases where there are several consecutive
1440 // stores to the same location, separated by a few arithmetic operations. This
1441 // situation often occurs with bitfield accesses.
1442 BasicBlock::iterator BBI(SI);
1443 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1446 // Don't count debug info directives, lest they affect codegen,
1447 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1448 if (isa<DbgInfoIntrinsic>(BBI) ||
1449 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1454 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1455 // Prev store isn't volatile, and stores to the same location?
1456 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1457 SI.getOperand(1))) {
1460 eraseInstFromFunction(*PrevSI);
1466 // If this is a load, we have to stop. However, if the loaded value is from
1467 // the pointer we're loading and is producing the pointer we're storing,
1468 // then *this* store is dead (X = load P; store X -> P).
1469 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1470 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1471 assert(SI.isUnordered() && "can't eliminate ordering operation");
1472 return eraseInstFromFunction(SI);
1475 // Otherwise, this is a load from some other location. Stores before it
1480 // Don't skip over loads, throws or things that can modify memory.
1481 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1485 // store X, null -> turns into 'unreachable' in SimplifyCFG
1486 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1487 if (canSimplifyNullStoreOrGEP(SI)) {
1488 if (!isa<UndefValue>(Val)) {
1489 SI.setOperand(0, UndefValue::get(Val->getType()));
1490 if (Instruction *U = dyn_cast<Instruction>(Val))
1491 Worklist.Add(U); // Dropped a use.
1493 return nullptr; // Do not modify these!
1496 // store undef, Ptr -> noop
1497 if (isa<UndefValue>(Val))
1498 return eraseInstFromFunction(SI);
1500 // If this store is the second-to-last instruction in the basic block
1501 // (excluding debug info and bitcasts of pointers) and if the block ends with
1502 // an unconditional branch, try to move the store to the successor block.
1503 BBI = SI.getIterator();
1506 } while (isa<DbgInfoIntrinsic>(BBI) ||
1507 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1509 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1510 if (BI->isUnconditional())
1511 mergeStoreIntoSuccessor(SI);
1516 /// Try to transform:
1517 /// if () { *P = v1; } else { *P = v2 }
1519 /// *P = v1; if () { *P = v2; }
1520 /// into a phi node with a store in the successor.
1521 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) {
1522 assert(SI.isUnordered() &&
1523 "This code has not been audited for volatile or ordered store case.");
1525 // Check if the successor block has exactly 2 incoming edges.
1526 BasicBlock *StoreBB = SI.getParent();
1527 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1528 if (!DestBB->hasNPredecessors(2))
1531 // Capture the other block (the block that doesn't contain our store).
1532 pred_iterator PredIter = pred_begin(DestBB);
1533 if (*PredIter == StoreBB)
1535 BasicBlock *OtherBB = *PredIter;
1537 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1538 // for example, if SI is in an infinite loop.
1539 if (StoreBB == DestBB || OtherBB == DestBB)
1542 // Verify that the other block ends in a branch and is not otherwise empty.
1543 BasicBlock::iterator BBI(OtherBB->getTerminator());
1544 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1545 if (!OtherBr || BBI == OtherBB->begin())
1548 // If the other block ends in an unconditional branch, check for the 'if then
1549 // else' case. There is an instruction before the branch.
1550 StoreInst *OtherStore = nullptr;
1551 if (OtherBr->isUnconditional()) {
1553 // Skip over debugging info.
1554 while (isa<DbgInfoIntrinsic>(BBI) ||
1555 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1556 if (BBI==OtherBB->begin())
1560 // If this isn't a store, isn't a store to the same location, or is not the
1561 // right kind of store, bail out.
1562 OtherStore = dyn_cast<StoreInst>(BBI);
1563 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1564 !SI.isSameOperationAs(OtherStore))
1567 // Otherwise, the other block ended with a conditional branch. If one of the
1568 // destinations is StoreBB, then we have the if/then case.
1569 if (OtherBr->getSuccessor(0) != StoreBB &&
1570 OtherBr->getSuccessor(1) != StoreBB)
1573 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1574 // if/then triangle. See if there is a store to the same ptr as SI that
1575 // lives in OtherBB.
1577 // Check to see if we find the matching store.
1578 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1579 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1580 !SI.isSameOperationAs(OtherStore))
1584 // If we find something that may be using or overwriting the stored
1585 // value, or if we run out of instructions, we can't do the transform.
1586 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1587 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1591 // In order to eliminate the store in OtherBr, we have to make sure nothing
1592 // reads or overwrites the stored value in StoreBB.
1593 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1594 // FIXME: This should really be AA driven.
1595 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1600 // Insert a PHI node now if we need it.
1601 Value *MergedVal = OtherStore->getOperand(0);
1602 // The debug locations of the original instructions might differ. Merge them.
1603 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1604 OtherStore->getDebugLoc());
1605 if (MergedVal != SI.getOperand(0)) {
1606 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1607 PN->addIncoming(SI.getOperand(0), SI.getParent());
1608 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1609 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1610 PN->setDebugLoc(MergedLoc);
1613 // Advance to a place where it is safe to insert the new store and insert it.
1614 BBI = DestBB->getFirstInsertionPt();
1615 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1616 SI.isVolatile(), SI.getAlignment(),
1617 SI.getOrdering(), SI.getSyncScopeID());
1618 InsertNewInstBefore(NewSI, *BBI);
1619 NewSI->setDebugLoc(MergedLoc);
1621 // If the two stores had AA tags, merge them.
1623 SI.getAAMetadata(AATags);
1625 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1626 NewSI->setAAMetadata(AATags);
1629 // Nuke the old stores.
1630 eraseInstFromFunction(SI);
1631 eraseInstFromFunction(*OtherStore);