1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for load, store and alloca.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/Transforms/Utils/Local.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 using namespace PatternMatch;
30 #define DEBUG_TYPE "instcombine"
32 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
33 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
35 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
36 /// some part of a constant global variable. This intentionally only accepts
37 /// constant expressions because we can't rewrite arbitrary instructions.
38 static bool pointsToConstantGlobal(Value *V) {
39 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
40 return GV->isConstant();
42 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
43 if (CE->getOpcode() == Instruction::BitCast ||
44 CE->getOpcode() == Instruction::AddrSpaceCast ||
45 CE->getOpcode() == Instruction::GetElementPtr)
46 return pointsToConstantGlobal(CE->getOperand(0));
51 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
52 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
53 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
54 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
55 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
56 /// the alloca, and if the source pointer is a pointer to a constant global, we
57 /// can optimize this.
59 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
60 SmallVectorImpl<Instruction *> &ToDelete) {
61 // We track lifetime intrinsics as we encounter them. If we decide to go
62 // ahead and replace the value with the global, this lets the caller quickly
63 // eliminate the markers.
65 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
66 ValuesToInspect.emplace_back(V, false);
67 while (!ValuesToInspect.empty()) {
68 auto ValuePair = ValuesToInspect.pop_back_val();
69 const bool IsOffset = ValuePair.second;
70 for (auto &U : ValuePair.first->uses()) {
71 auto *I = cast<Instruction>(U.getUser());
73 if (auto *LI = dyn_cast<LoadInst>(I)) {
74 // Ignore non-volatile loads, they are always ok.
75 if (!LI->isSimple()) return false;
79 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
80 // If uses of the bitcast are ok, we are ok.
81 ValuesToInspect.emplace_back(I, IsOffset);
84 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
85 // If the GEP has all zero indices, it doesn't offset the pointer. If it
87 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
91 if (auto *Call = dyn_cast<CallBase>(I)) {
92 // If this is the function being called then we treat it like a load and
94 if (Call->isCallee(&U))
97 unsigned DataOpNo = Call->getDataOperandNo(&U);
98 bool IsArgOperand = Call->isArgOperand(&U);
100 // Inalloca arguments are clobbered by the call.
101 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
104 // If this is a readonly/readnone call site, then we know it is just a
105 // load (but one that potentially returns the value itself), so we can
106 // ignore it if we know that the value isn't captured.
107 if (Call->onlyReadsMemory() &&
108 (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
111 // If this is being passed as a byval argument, the caller is making a
112 // copy, so it is only a read of the alloca.
113 if (IsArgOperand && Call->isByValArgument(DataOpNo))
117 // Lifetime intrinsics can be handled by the caller.
118 if (I->isLifetimeStartOrEnd()) {
119 assert(I->use_empty() && "Lifetime markers have no result to use!");
120 ToDelete.push_back(I);
124 // If this is isn't our memcpy/memmove, reject it as something we can't
126 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U.getOperandNo() == 1) {
133 if (MI->isVolatile()) return false;
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy) return false;
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset) return false;
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U.getOperandNo() != 0) return false;
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI->getSource()))
151 // Otherwise, the transform is safe. Remember the copy instruction.
158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159 /// modified by a copy from a constant global. If we can prove this, we can
160 /// replace any uses of the alloca with uses of the global directly.
161 static MemTransferInst *
162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163 SmallVectorImpl<Instruction *> &ToDelete) {
164 MemTransferInst *TheCopy = nullptr;
165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
170 /// Returns true if V is dereferenceable for size of alloca.
171 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
172 const DataLayout &DL) {
173 if (AI->isArrayAllocation())
175 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
178 return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()),
179 APInt(64, AllocaSize), DL);
182 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
183 // Check for array size of 1 (scalar allocation).
184 if (!AI.isArrayAllocation()) {
185 // i32 1 is the canonical array size for scalar allocations.
186 if (AI.getArraySize()->getType()->isIntegerTy(32))
190 Value *V = IC.Builder.getInt32(1);
195 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
196 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
197 if (C->getValue().getActiveBits() <= 64) {
198 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
199 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
200 New->setAlignment(MaybeAlign(AI.getAlignment()));
202 // Scan to the end of the allocation instructions, to skip over a block of
203 // allocas if possible...also skip interleaved debug info
205 BasicBlock::iterator It(New);
206 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
209 // Now that I is pointing to the first non-allocation-inst in the block,
210 // insert our getelementptr instruction...
212 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
213 Value *NullIdx = Constant::getNullValue(IdxTy);
214 Value *Idx[2] = {NullIdx, NullIdx};
215 Instruction *GEP = GetElementPtrInst::CreateInBounds(
216 NewTy, New, Idx, New->getName() + ".sub");
217 IC.InsertNewInstBefore(GEP, *It);
219 // Now make everything use the getelementptr instead of the original
221 return IC.replaceInstUsesWith(AI, GEP);
225 if (isa<UndefValue>(AI.getArraySize()))
226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
228 // Ensure that the alloca array size argument has type intptr_t, so that
229 // any casting is exposed early.
230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
231 if (AI.getArraySize()->getType() != IntPtrTy) {
232 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
241 // If I and V are pointers in different address space, it is not allowed to
242 // use replaceAllUsesWith since I and V have different types. A
243 // non-target-specific transformation should not use addrspacecast on V since
244 // the two address space may be disjoint depending on target.
246 // This class chases down uses of the old pointer until reaching the load
247 // instructions, then replaces the old pointer in the load instructions with
248 // the new pointer. If during the chasing it sees bitcast or GEP, it will
249 // create new bitcast or GEP with the new pointer and use them in the load
251 class PointerReplacer {
253 PointerReplacer(InstCombiner &IC) : IC(IC) {}
254 void replacePointer(Instruction &I, Value *V);
257 void findLoadAndReplace(Instruction &I);
258 void replace(Instruction *I);
259 Value *getReplacement(Value *I);
261 SmallVector<Instruction *, 4> Path;
262 MapVector<Value *, Value *> WorkMap;
265 } // end anonymous namespace
267 void PointerReplacer::findLoadAndReplace(Instruction &I) {
268 for (auto U : I.users()) {
269 auto *Inst = dyn_cast<Instruction>(&*U);
272 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
273 if (isa<LoadInst>(Inst)) {
277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
278 Path.push_back(Inst);
279 findLoadAndReplace(*Inst);
287 Value *PointerReplacer::getReplacement(Value *V) {
288 auto Loc = WorkMap.find(V);
289 if (Loc != WorkMap.end())
294 void PointerReplacer::replace(Instruction *I) {
295 if (getReplacement(I))
298 if (auto *LT = dyn_cast<LoadInst>(I)) {
299 auto *V = getReplacement(LT->getPointerOperand());
300 assert(V && "Operand not replaced");
301 auto *NewI = new LoadInst(I->getType(), V);
303 IC.InsertNewInstWith(NewI, *LT);
304 IC.replaceInstUsesWith(*LT, NewI);
306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307 auto *V = getReplacement(GEP->getPointerOperand());
308 assert(V && "Operand not replaced");
309 SmallVector<Value *, 8> Indices;
310 Indices.append(GEP->idx_begin(), GEP->idx_end());
311 auto *NewI = GetElementPtrInst::Create(
312 V->getType()->getPointerElementType(), V, Indices);
313 IC.InsertNewInstWith(NewI, *GEP);
316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317 auto *V = getReplacement(BC->getOperand(0));
318 assert(V && "Operand not replaced");
319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320 V->getType()->getPointerAddressSpace());
321 auto *NewI = new BitCastInst(V, NewT);
322 IC.InsertNewInstWith(NewI, *BC);
326 llvm_unreachable("should never reach here");
330 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
332 auto *PT = cast<PointerType>(I.getType());
333 auto *NT = cast<PointerType>(V->getType());
334 assert(PT != NT && PT->getElementType() == NT->getElementType() &&
338 findLoadAndReplace(I);
341 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
342 if (auto *I = simplifyAllocaArraySize(*this, AI))
345 if (AI.getAllocatedType()->isSized()) {
346 // If the alignment is 0 (unspecified), assign it the preferred alignment.
347 if (AI.getAlignment() == 0)
349 MaybeAlign(DL.getPrefTypeAlignment(AI.getAllocatedType())));
351 // Move all alloca's of zero byte objects to the entry block and merge them
352 // together. Note that we only do this for alloca's, because malloc should
353 // allocate and return a unique pointer, even for a zero byte allocation.
354 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
355 // For a zero sized alloca there is no point in doing an array allocation.
356 // This is helpful if the array size is a complicated expression not used
358 if (AI.isArrayAllocation()) {
359 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
363 // Get the first instruction in the entry block.
364 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
365 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
366 if (FirstInst != &AI) {
367 // If the entry block doesn't start with a zero-size alloca then move
368 // this one to the start of the entry block. There is no problem with
369 // dominance as the array size was forced to a constant earlier already.
370 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
371 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
372 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
373 AI.moveBefore(FirstInst);
377 // If the alignment of the entry block alloca is 0 (unspecified),
378 // assign it the preferred alignment.
379 if (EntryAI->getAlignment() == 0)
380 EntryAI->setAlignment(
381 MaybeAlign(DL.getPrefTypeAlignment(EntryAI->getAllocatedType())));
382 // Replace this zero-sized alloca with the one at the start of the entry
383 // block after ensuring that the address will be aligned enough for both
385 const MaybeAlign MaxAlign(
386 std::max(EntryAI->getAlignment(), AI.getAlignment()));
387 EntryAI->setAlignment(MaxAlign);
388 if (AI.getType() != EntryAI->getType())
389 return new BitCastInst(EntryAI, AI.getType());
390 return replaceInstUsesWith(AI, EntryAI);
395 if (AI.getAlignment()) {
396 // Check to see if this allocation is only modified by a memcpy/memmove from
397 // a constant global whose alignment is equal to or exceeds that of the
398 // allocation. If this is the case, we can change all users to use
399 // the constant global instead. This is commonly produced by the CFE by
400 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
401 // is only subsequently read.
402 SmallVector<Instruction *, 4> ToDelete;
403 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
404 unsigned SourceAlign = getOrEnforceKnownAlignment(
405 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
406 if (AI.getAlignment() <= SourceAlign &&
407 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
408 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
409 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
410 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
411 eraseInstFromFunction(*ToDelete[i]);
412 Constant *TheSrc = cast<Constant>(Copy->getSource());
413 auto *SrcTy = TheSrc->getType();
414 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
415 SrcTy->getPointerAddressSpace());
417 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
418 if (AI.getType()->getPointerAddressSpace() ==
419 SrcTy->getPointerAddressSpace()) {
420 Instruction *NewI = replaceInstUsesWith(AI, Cast);
421 eraseInstFromFunction(*Copy);
425 PointerReplacer PtrReplacer(*this);
426 PtrReplacer.replacePointer(AI, Cast);
433 // At last, use the generic allocation site handler to aggressively remove
435 return visitAllocSite(AI);
438 // Are we allowed to form a atomic load or store of this type?
439 static bool isSupportedAtomicType(Type *Ty) {
440 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
443 /// Helper to combine a load to a new type.
445 /// This just does the work of combining a load to a new type. It handles
446 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
447 /// loaded *value* type. This will convert it to a pointer, cast the operand to
448 /// that pointer type, load it, etc.
450 /// Note that this will create all of the instructions with whatever insert
451 /// point the \c InstCombiner currently is using.
452 LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy,
453 const Twine &Suffix) {
454 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
455 "can't fold an atomic load to requested type");
457 Value *Ptr = LI.getPointerOperand();
458 unsigned AS = LI.getPointerAddressSpace();
459 Value *NewPtr = nullptr;
460 if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
461 NewPtr->getType()->getPointerElementType() == NewTy &&
462 NewPtr->getType()->getPointerAddressSpace() == AS))
463 NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
465 unsigned Align = LI.getAlignment();
467 // If old load did not have an explicit alignment specified,
468 // manually preserve the implied (ABI) alignment of the load.
469 // Else we may inadvertently incorrectly over-promise alignment.
470 Align = getDataLayout().getABITypeAlignment(LI.getType());
472 LoadInst *NewLoad = Builder.CreateAlignedLoad(
473 NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix);
474 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
475 copyMetadataForLoad(*NewLoad, LI);
479 /// Combine a store to a new type.
481 /// Returns the newly created store instruction.
482 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
483 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
484 "can't fold an atomic store of requested type");
486 Value *Ptr = SI.getPointerOperand();
487 unsigned AS = SI.getPointerAddressSpace();
488 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
489 SI.getAllMetadata(MD);
491 StoreInst *NewStore = IC.Builder.CreateAlignedStore(
492 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
493 SI.getAlignment(), SI.isVolatile());
494 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
495 for (const auto &MDPair : MD) {
496 unsigned ID = MDPair.first;
497 MDNode *N = MDPair.second;
498 // Note, essentially every kind of metadata should be preserved here! This
499 // routine is supposed to clone a store instruction changing *only its
500 // type*. The only metadata it makes sense to drop is metadata which is
501 // invalidated when the pointer type changes. This should essentially
502 // never be the case in LLVM, but we explicitly switch over only known
503 // metadata to be conservatively correct. If you are adding metadata to
504 // LLVM which pertains to stores, you almost certainly want to add it
507 case LLVMContext::MD_dbg:
508 case LLVMContext::MD_tbaa:
509 case LLVMContext::MD_prof:
510 case LLVMContext::MD_fpmath:
511 case LLVMContext::MD_tbaa_struct:
512 case LLVMContext::MD_alias_scope:
513 case LLVMContext::MD_noalias:
514 case LLVMContext::MD_nontemporal:
515 case LLVMContext::MD_mem_parallel_loop_access:
516 case LLVMContext::MD_access_group:
517 // All of these directly apply.
518 NewStore->setMetadata(ID, N);
520 case LLVMContext::MD_invariant_load:
521 case LLVMContext::MD_nonnull:
522 case LLVMContext::MD_range:
523 case LLVMContext::MD_align:
524 case LLVMContext::MD_dereferenceable:
525 case LLVMContext::MD_dereferenceable_or_null:
526 // These don't apply for stores.
534 /// Returns true if instruction represent minmax pattern like:
535 /// select ((cmp load V1, load V2), V1, V2).
536 static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) {
537 assert(V->getType()->isPointerTy() && "Expected pointer type.");
538 // Ignore possible ty* to ixx* bitcast.
539 V = peekThroughBitcast(V);
540 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
542 CmpInst::Predicate Pred;
547 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
548 m_Value(LHS), m_Value(RHS))))
550 LoadTy = L1->getType();
551 return (match(L1, m_Load(m_Specific(LHS))) &&
552 match(L2, m_Load(m_Specific(RHS)))) ||
553 (match(L1, m_Load(m_Specific(RHS))) &&
554 match(L2, m_Load(m_Specific(LHS))));
557 /// Combine loads to match the type of their uses' value after looking
558 /// through intervening bitcasts.
560 /// The core idea here is that if the result of a load is used in an operation,
561 /// we should load the type most conducive to that operation. For example, when
562 /// loading an integer and converting that immediately to a pointer, we should
563 /// instead directly load a pointer.
565 /// However, this routine must never change the width of a load or the number of
566 /// loads as that would introduce a semantic change. This combine is expected to
567 /// be a semantic no-op which just allows loads to more closely model the types
568 /// of their consuming operations.
570 /// Currently, we also refuse to change the precise type used for an atomic load
571 /// or a volatile load. This is debatable, and might be reasonable to change
572 /// later. However, it is risky in case some backend or other part of LLVM is
573 /// relying on the exact type loaded to select appropriate atomic operations.
574 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
575 // FIXME: We could probably with some care handle both volatile and ordered
576 // atomic loads here but it isn't clear that this is important.
577 if (!LI.isUnordered())
583 // swifterror values can't be bitcasted.
584 if (LI.getPointerOperand()->isSwiftError())
587 Type *Ty = LI.getType();
588 const DataLayout &DL = IC.getDataLayout();
590 // Try to canonicalize loads which are only ever stored to operate over
591 // integers instead of any other type. We only do this when the loaded type
592 // is sized and has a size exactly the same as its store size and the store
593 // size is a legal integer type.
594 // Do not perform canonicalization if minmax pattern is found (to avoid
597 if (!Ty->isIntegerTy() && Ty->isSized() &&
598 !(Ty->isVectorTy() && Ty->getVectorIsScalable()) &&
599 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
600 DL.typeSizeEqualsStoreSize(Ty) &&
601 !DL.isNonIntegralPointerType(Ty) &&
603 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true),
605 if (all_of(LI.users(), [&LI](User *U) {
606 auto *SI = dyn_cast<StoreInst>(U);
607 return SI && SI->getPointerOperand() != &LI &&
608 !SI->getPointerOperand()->isSwiftError();
610 LoadInst *NewLoad = IC.combineLoadToNewType(
611 LI, Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
612 // Replace all the stores with stores of the newly loaded value.
613 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
614 auto *SI = cast<StoreInst>(*UI++);
615 IC.Builder.SetInsertPoint(SI);
616 combineStoreToNewValue(IC, *SI, NewLoad);
617 IC.eraseInstFromFunction(*SI);
619 assert(LI.use_empty() && "Failed to remove all users of the load!");
620 // Return the old load so the combiner can delete it safely.
625 // Fold away bit casts of the loaded value by loading the desired type.
626 // We can do this for BitCastInsts as well as casts from and to pointer types,
627 // as long as those are noops (i.e., the source or dest type have the same
628 // bitwidth as the target's pointers).
630 if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
631 if (CI->isNoopCast(DL))
632 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
633 LoadInst *NewLoad = IC.combineLoadToNewType(LI, CI->getDestTy());
634 CI->replaceAllUsesWith(NewLoad);
635 IC.eraseInstFromFunction(*CI);
639 // FIXME: We should also canonicalize loads of vectors when their elements are
640 // cast to other types.
644 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
645 // FIXME: We could probably with some care handle both volatile and atomic
646 // stores here but it isn't clear that this is important.
650 Type *T = LI.getType();
651 if (!T->isAggregateType())
654 StringRef Name = LI.getName();
655 assert(LI.getAlignment() && "Alignment must be set at this point");
657 if (auto *ST = dyn_cast<StructType>(T)) {
658 // If the struct only have one element, we unpack.
659 auto NumElements = ST->getNumElements();
660 if (NumElements == 1) {
661 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
664 LI.getAAMetadata(AAMD);
665 NewLoad->setAAMetadata(AAMD);
666 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
667 UndefValue::get(T), NewLoad, 0, Name));
670 // We don't want to break loads with padding here as we'd loose
671 // the knowledge that padding exists for the rest of the pipeline.
672 const DataLayout &DL = IC.getDataLayout();
673 auto *SL = DL.getStructLayout(ST);
674 if (SL->hasPadding())
677 auto Align = LI.getAlignment();
679 Align = DL.getABITypeAlignment(ST);
681 auto *Addr = LI.getPointerOperand();
682 auto *IdxType = Type::getInt32Ty(T->getContext());
683 auto *Zero = ConstantInt::get(IdxType, 0);
685 Value *V = UndefValue::get(T);
686 for (unsigned i = 0; i < NumElements; i++) {
687 Value *Indices[2] = {
689 ConstantInt::get(IdxType, i),
691 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
693 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
694 auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
695 EltAlign, Name + ".unpack");
696 // Propagate AA metadata. It'll still be valid on the narrowed load.
698 LI.getAAMetadata(AAMD);
699 L->setAAMetadata(AAMD);
700 V = IC.Builder.CreateInsertValue(V, L, i);
704 return IC.replaceInstUsesWith(LI, V);
707 if (auto *AT = dyn_cast<ArrayType>(T)) {
708 auto *ET = AT->getElementType();
709 auto NumElements = AT->getNumElements();
710 if (NumElements == 1) {
711 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
713 LI.getAAMetadata(AAMD);
714 NewLoad->setAAMetadata(AAMD);
715 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
716 UndefValue::get(T), NewLoad, 0, Name));
719 // Bail out if the array is too large. Ideally we would like to optimize
720 // arrays of arbitrary size but this has a terrible impact on compile time.
721 // The threshold here is chosen arbitrarily, maybe needs a little bit of
723 if (NumElements > IC.MaxArraySizeForCombine)
726 const DataLayout &DL = IC.getDataLayout();
727 auto EltSize = DL.getTypeAllocSize(ET);
728 auto Align = LI.getAlignment();
730 Align = DL.getABITypeAlignment(T);
732 auto *Addr = LI.getPointerOperand();
733 auto *IdxType = Type::getInt64Ty(T->getContext());
734 auto *Zero = ConstantInt::get(IdxType, 0);
736 Value *V = UndefValue::get(T);
738 for (uint64_t i = 0; i < NumElements; i++) {
739 Value *Indices[2] = {
741 ConstantInt::get(IdxType, i),
743 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
745 auto *L = IC.Builder.CreateAlignedLoad(
746 AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
748 LI.getAAMetadata(AAMD);
749 L->setAAMetadata(AAMD);
750 V = IC.Builder.CreateInsertValue(V, L, i);
755 return IC.replaceInstUsesWith(LI, V);
761 // If we can determine that all possible objects pointed to by the provided
762 // pointer value are, not only dereferenceable, but also definitively less than
763 // or equal to the provided maximum size, then return true. Otherwise, return
764 // false (constant global values and allocas fall into this category).
766 // FIXME: This should probably live in ValueTracking (or similar).
767 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
768 const DataLayout &DL) {
769 SmallPtrSet<Value *, 4> Visited;
770 SmallVector<Value *, 4> Worklist(1, V);
773 Value *P = Worklist.pop_back_val();
774 P = P->stripPointerCasts();
776 if (!Visited.insert(P).second)
779 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
780 Worklist.push_back(SI->getTrueValue());
781 Worklist.push_back(SI->getFalseValue());
785 if (PHINode *PN = dyn_cast<PHINode>(P)) {
786 for (Value *IncValue : PN->incoming_values())
787 Worklist.push_back(IncValue);
791 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
792 if (GA->isInterposable())
794 Worklist.push_back(GA->getAliasee());
798 // If we know how big this object is, and it is less than MaxSize, continue
799 // searching. Otherwise, return false.
800 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
801 if (!AI->getAllocatedType()->isSized())
804 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
808 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
809 // Make sure that, even if the multiplication below would wrap as an
810 // uint64_t, we still do the right thing.
811 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
816 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
817 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
820 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
821 if (InitSize > MaxSize)
827 } while (!Worklist.empty());
832 // If we're indexing into an object of a known size, and the outer index is
833 // not a constant, but having any value but zero would lead to undefined
834 // behavior, replace it with zero.
836 // For example, if we have:
837 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
839 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
840 // ... = load i32* %arrayidx, align 4
841 // Then we know that we can replace %x in the GEP with i64 0.
843 // FIXME: We could fold any GEP index to zero that would cause UB if it were
844 // not zero. Currently, we only handle the first such index. Also, we could
845 // also search through non-zero constant indices if we kept track of the
846 // offsets those indices implied.
847 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
848 Instruction *MemI, unsigned &Idx) {
849 if (GEPI->getNumOperands() < 2)
852 // Find the first non-zero index of a GEP. If all indices are zero, return
853 // one past the last index.
854 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
856 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
857 Value *V = GEPI->getOperand(I);
858 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
868 // Skip through initial 'zero' indices, and find the corresponding pointer
869 // type. See if the next index is not a constant.
870 Idx = FirstNZIdx(GEPI);
871 if (Idx == GEPI->getNumOperands())
873 if (isa<Constant>(GEPI->getOperand(Idx)))
876 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
878 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
879 if (!AllocTy || !AllocTy->isSized())
881 const DataLayout &DL = IC.getDataLayout();
882 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
884 // If there are more indices after the one we might replace with a zero, make
885 // sure they're all non-negative. If any of them are negative, the overall
886 // address being computed might be before the base address determined by the
887 // first non-zero index.
888 auto IsAllNonNegative = [&]() {
889 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
890 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
891 if (Known.isNonNegative())
899 // FIXME: If the GEP is not inbounds, and there are extra indices after the
900 // one we'll replace, those could cause the address computation to wrap
901 // (rendering the IsAllNonNegative() check below insufficient). We can do
902 // better, ignoring zero indices (and other indices we can prove small
903 // enough not to wrap).
904 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
907 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
908 // also known to be dereferenceable.
909 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
913 // If we're indexing into an object with a variable index for the memory
914 // access, but the object has only one element, we can assume that the index
915 // will always be zero. If we replace the GEP, return it.
916 template <typename T>
917 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
919 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
921 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
922 Instruction *NewGEPI = GEPI->clone();
923 NewGEPI->setOperand(Idx,
924 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
925 NewGEPI->insertBefore(GEPI);
926 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
934 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
935 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
938 auto *Ptr = SI.getPointerOperand();
939 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
940 Ptr = GEPI->getOperand(0);
941 return (isa<ConstantPointerNull>(Ptr) &&
942 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
945 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
946 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
947 const Value *GEPI0 = GEPI->getOperand(0);
948 if (isa<ConstantPointerNull>(GEPI0) &&
949 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
952 if (isa<UndefValue>(Op) ||
953 (isa<ConstantPointerNull>(Op) &&
954 !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
959 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
960 Value *Op = LI.getOperand(0);
962 // Try to canonicalize the loaded type.
963 if (Instruction *Res = combineLoadToOperationType(*this, LI))
966 // Attempt to improve the alignment.
967 unsigned KnownAlign = getOrEnforceKnownAlignment(
968 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
969 unsigned LoadAlign = LI.getAlignment();
970 unsigned EffectiveLoadAlign =
971 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
973 if (KnownAlign > EffectiveLoadAlign)
974 LI.setAlignment(MaybeAlign(KnownAlign));
975 else if (LoadAlign == 0)
976 LI.setAlignment(MaybeAlign(EffectiveLoadAlign));
978 // Replace GEP indices if possible.
979 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
980 Worklist.Add(NewGEPI);
984 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
987 // Do really simple store-to-load forwarding and load CSE, to catch cases
988 // where there are several consecutive memory accesses to the same location,
989 // separated by a few arithmetic operations.
990 BasicBlock::iterator BBI(LI);
991 bool IsLoadCSE = false;
992 if (Value *AvailableVal = FindAvailableLoadedValue(
993 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
995 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
997 return replaceInstUsesWith(
998 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
999 LI.getName() + ".cast"));
1002 // None of the following transforms are legal for volatile/ordered atomic
1003 // loads. Most of them do apply for unordered atomics.
1004 if (!LI.isUnordered()) return nullptr;
1006 // load(gep null, ...) -> unreachable
1007 // load null/undef -> unreachable
1008 // TODO: Consider a target hook for valid address spaces for this xforms.
1009 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1010 // Insert a new store to null instruction before the load to indicate
1011 // that this code is not reachable. We do this instead of inserting
1012 // an unreachable instruction directly because we cannot modify the
1014 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1015 Constant::getNullValue(Op->getType()), &LI);
1016 SI->setDebugLoc(LI.getDebugLoc());
1017 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
1020 if (Op->hasOneUse()) {
1021 // Change select and PHI nodes to select values instead of addresses: this
1022 // helps alias analysis out a lot, allows many others simplifications, and
1023 // exposes redundancy in the code.
1025 // Note that we cannot do the transformation unless we know that the
1026 // introduced loads cannot trap! Something like this is valid as long as
1027 // the condition is always false: load (select bool %C, int* null, int* %G),
1028 // but it would not be valid if we transformed it to load from null
1031 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1032 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1033 const MaybeAlign Alignment(LI.getAlignment());
1034 if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1035 Alignment, DL, SI) &&
1036 isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1037 Alignment, DL, SI)) {
1039 Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1040 SI->getOperand(1)->getName() + ".val");
1042 Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1043 SI->getOperand(2)->getName() + ".val");
1044 assert(LI.isUnordered() && "implied by above");
1045 V1->setAlignment(Alignment);
1046 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1047 V2->setAlignment(Alignment);
1048 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1049 return SelectInst::Create(SI->getCondition(), V1, V2);
1052 // load (select (cond, null, P)) -> load P
1053 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1054 !NullPointerIsDefined(SI->getFunction(),
1055 LI.getPointerAddressSpace())) {
1056 LI.setOperand(0, SI->getOperand(2));
1060 // load (select (cond, P, null)) -> load P
1061 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1062 !NullPointerIsDefined(SI->getFunction(),
1063 LI.getPointerAddressSpace())) {
1064 LI.setOperand(0, SI->getOperand(1));
1072 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1074 /// \returns underlying value that was "cast", or nullptr otherwise.
1076 /// For example, if we have:
1078 /// %E0 = extractelement <2 x double> %U, i32 0
1079 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
1080 /// %E1 = extractelement <2 x double> %U, i32 1
1081 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1083 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1084 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1085 /// Note that %U may contain non-undef values where %V1 has undef.
1086 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1088 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1089 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1092 auto *W = E->getVectorOperand();
1097 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1098 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1100 V = IV->getAggregateOperand();
1102 if (!isa<UndefValue>(V) ||!U)
1105 auto *UT = cast<VectorType>(U->getType());
1106 auto *VT = V->getType();
1107 // Check that types UT and VT are bitwise isomorphic.
1108 const auto &DL = IC.getDataLayout();
1109 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1112 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1113 if (AT->getNumElements() != UT->getNumElements())
1116 auto *ST = cast<StructType>(VT);
1117 if (ST->getNumElements() != UT->getNumElements())
1119 for (const auto *EltT : ST->elements()) {
1120 if (EltT != UT->getElementType())
1127 /// Combine stores to match the type of value being stored.
1129 /// The core idea here is that the memory does not have any intrinsic type and
1130 /// where we can we should match the type of a store to the type of value being
1133 /// However, this routine must never change the width of a store or the number of
1134 /// stores as that would introduce a semantic change. This combine is expected to
1135 /// be a semantic no-op which just allows stores to more closely model the types
1136 /// of their incoming values.
1138 /// Currently, we also refuse to change the precise type used for an atomic or
1139 /// volatile store. This is debatable, and might be reasonable to change later.
1140 /// However, it is risky in case some backend or other part of LLVM is relying
1141 /// on the exact type stored to select appropriate atomic operations.
1143 /// \returns true if the store was successfully combined away. This indicates
1144 /// the caller must erase the store instruction. We have to let the caller erase
1145 /// the store instruction as otherwise there is no way to signal whether it was
1146 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1147 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1148 // FIXME: We could probably with some care handle both volatile and ordered
1149 // atomic stores here but it isn't clear that this is important.
1150 if (!SI.isUnordered())
1153 // swifterror values can't be bitcasted.
1154 if (SI.getPointerOperand()->isSwiftError())
1157 Value *V = SI.getValueOperand();
1159 // Fold away bit casts of the stored value by storing the original type.
1160 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1161 V = BC->getOperand(0);
1162 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1163 combineStoreToNewValue(IC, SI, V);
1168 if (Value *U = likeBitCastFromVector(IC, V))
1169 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1170 combineStoreToNewValue(IC, SI, U);
1174 // FIXME: We should also canonicalize stores of vectors when their elements
1175 // are cast to other types.
1179 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1180 // FIXME: We could probably with some care handle both volatile and atomic
1181 // stores here but it isn't clear that this is important.
1185 Value *V = SI.getValueOperand();
1186 Type *T = V->getType();
1188 if (!T->isAggregateType())
1191 if (auto *ST = dyn_cast<StructType>(T)) {
1192 // If the struct only have one element, we unpack.
1193 unsigned Count = ST->getNumElements();
1195 V = IC.Builder.CreateExtractValue(V, 0);
1196 combineStoreToNewValue(IC, SI, V);
1200 // We don't want to break loads with padding here as we'd loose
1201 // the knowledge that padding exists for the rest of the pipeline.
1202 const DataLayout &DL = IC.getDataLayout();
1203 auto *SL = DL.getStructLayout(ST);
1204 if (SL->hasPadding())
1207 auto Align = SI.getAlignment();
1209 Align = DL.getABITypeAlignment(ST);
1211 SmallString<16> EltName = V->getName();
1213 auto *Addr = SI.getPointerOperand();
1214 SmallString<16> AddrName = Addr->getName();
1215 AddrName += ".repack";
1217 auto *IdxType = Type::getInt32Ty(ST->getContext());
1218 auto *Zero = ConstantInt::get(IdxType, 0);
1219 for (unsigned i = 0; i < Count; i++) {
1220 Value *Indices[2] = {
1222 ConstantInt::get(IdxType, i),
1224 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1226 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1227 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1228 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1230 SI.getAAMetadata(AAMD);
1231 NS->setAAMetadata(AAMD);
1237 if (auto *AT = dyn_cast<ArrayType>(T)) {
1238 // If the array only have one element, we unpack.
1239 auto NumElements = AT->getNumElements();
1240 if (NumElements == 1) {
1241 V = IC.Builder.CreateExtractValue(V, 0);
1242 combineStoreToNewValue(IC, SI, V);
1246 // Bail out if the array is too large. Ideally we would like to optimize
1247 // arrays of arbitrary size but this has a terrible impact on compile time.
1248 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1250 if (NumElements > IC.MaxArraySizeForCombine)
1253 const DataLayout &DL = IC.getDataLayout();
1254 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1255 auto Align = SI.getAlignment();
1257 Align = DL.getABITypeAlignment(T);
1259 SmallString<16> EltName = V->getName();
1261 auto *Addr = SI.getPointerOperand();
1262 SmallString<16> AddrName = Addr->getName();
1263 AddrName += ".repack";
1265 auto *IdxType = Type::getInt64Ty(T->getContext());
1266 auto *Zero = ConstantInt::get(IdxType, 0);
1268 uint64_t Offset = 0;
1269 for (uint64_t i = 0; i < NumElements; i++) {
1270 Value *Indices[2] = {
1272 ConstantInt::get(IdxType, i),
1274 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1276 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1277 auto EltAlign = MinAlign(Align, Offset);
1278 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1280 SI.getAAMetadata(AAMD);
1281 NS->setAAMetadata(AAMD);
1291 /// equivalentAddressValues - Test if A and B will obviously have the same
1292 /// value. This includes recognizing that %t0 and %t1 will have the same
1293 /// value in code like this:
1294 /// %t0 = getelementptr \@a, 0, 3
1295 /// store i32 0, i32* %t0
1296 /// %t1 = getelementptr \@a, 0, 3
1297 /// %t2 = load i32* %t1
1299 static bool equivalentAddressValues(Value *A, Value *B) {
1300 // Test if the values are trivially equivalent.
1301 if (A == B) return true;
1303 // Test if the values come form identical arithmetic instructions.
1304 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1305 // its only used to compare two uses within the same basic block, which
1306 // means that they'll always either have the same value or one of them
1307 // will have an undefined value.
1308 if (isa<BinaryOperator>(A) ||
1311 isa<GetElementPtrInst>(A))
1312 if (Instruction *BI = dyn_cast<Instruction>(B))
1313 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1316 // Otherwise they may not be equivalent.
1320 /// Converts store (bitcast (load (bitcast (select ...)))) to
1321 /// store (load (select ...)), where select is minmax:
1322 /// select ((cmp load V1, load V2), V1, V2).
1323 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1326 if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1330 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1332 auto *LI = cast<LoadInst>(SI.getValueOperand());
1333 if (!LI->getType()->isIntegerTy())
1336 if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy))
1339 // Make sure we're not changing the size of the load/store.
1340 const auto &DL = IC.getDataLayout();
1341 if (DL.getTypeStoreSizeInBits(LI->getType()) !=
1342 DL.getTypeStoreSizeInBits(CmpLoadTy))
1345 if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1346 auto *SI = dyn_cast<StoreInst>(U);
1347 return SI && SI->getPointerOperand() != LI &&
1348 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1349 !SI->getPointerOperand()->isSwiftError();
1353 IC.Builder.SetInsertPoint(LI);
1354 LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy);
1355 // Replace all the stores with stores of the newly loaded value.
1356 for (auto *UI : LI->users()) {
1357 auto *USI = cast<StoreInst>(UI);
1358 IC.Builder.SetInsertPoint(USI);
1359 combineStoreToNewValue(IC, *USI, NewLI);
1361 IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1362 IC.eraseInstFromFunction(*LI);
1366 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1367 Value *Val = SI.getOperand(0);
1368 Value *Ptr = SI.getOperand(1);
1370 // Try to canonicalize the stored type.
1371 if (combineStoreToValueType(*this, SI))
1372 return eraseInstFromFunction(SI);
1374 // Attempt to improve the alignment.
1375 const Align KnownAlign = Align(getOrEnforceKnownAlignment(
1376 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT));
1377 const MaybeAlign StoreAlign = MaybeAlign(SI.getAlignment());
1378 const Align EffectiveStoreAlign =
1379 StoreAlign ? *StoreAlign : Align(DL.getABITypeAlignment(Val->getType()));
1381 if (KnownAlign > EffectiveStoreAlign)
1382 SI.setAlignment(KnownAlign);
1383 else if (!StoreAlign)
1384 SI.setAlignment(EffectiveStoreAlign);
1386 // Try to canonicalize the stored type.
1387 if (unpackStoreToAggregate(*this, SI))
1388 return eraseInstFromFunction(SI);
1390 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1391 return eraseInstFromFunction(SI);
1393 // Replace GEP indices if possible.
1394 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1395 Worklist.Add(NewGEPI);
1399 // Don't hack volatile/ordered stores.
1400 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1401 if (!SI.isUnordered()) return nullptr;
1403 // If the RHS is an alloca with a single use, zapify the store, making the
1405 if (Ptr->hasOneUse()) {
1406 if (isa<AllocaInst>(Ptr))
1407 return eraseInstFromFunction(SI);
1408 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1409 if (isa<AllocaInst>(GEP->getOperand(0))) {
1410 if (GEP->getOperand(0)->hasOneUse())
1411 return eraseInstFromFunction(SI);
1416 // If we have a store to a location which is known constant, we can conclude
1417 // that the store must be storing the constant value (else the memory
1418 // wouldn't be constant), and this must be a noop.
1419 if (AA->pointsToConstantMemory(Ptr))
1420 return eraseInstFromFunction(SI);
1422 // Do really simple DSE, to catch cases where there are several consecutive
1423 // stores to the same location, separated by a few arithmetic operations. This
1424 // situation often occurs with bitfield accesses.
1425 BasicBlock::iterator BBI(SI);
1426 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1429 // Don't count debug info directives, lest they affect codegen,
1430 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1431 if (isa<DbgInfoIntrinsic>(BBI) ||
1432 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1437 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1438 // Prev store isn't volatile, and stores to the same location?
1439 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1440 SI.getOperand(1))) {
1442 // Manually add back the original store to the worklist now, so it will
1443 // be processed after the operands of the removed store, as this may
1444 // expose additional DSE opportunities.
1446 eraseInstFromFunction(*PrevSI);
1452 // If this is a load, we have to stop. However, if the loaded value is from
1453 // the pointer we're loading and is producing the pointer we're storing,
1454 // then *this* store is dead (X = load P; store X -> P).
1455 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1456 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1457 assert(SI.isUnordered() && "can't eliminate ordering operation");
1458 return eraseInstFromFunction(SI);
1461 // Otherwise, this is a load from some other location. Stores before it
1466 // Don't skip over loads, throws or things that can modify memory.
1467 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1471 // store X, null -> turns into 'unreachable' in SimplifyCFG
1472 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1473 if (canSimplifyNullStoreOrGEP(SI)) {
1474 if (!isa<UndefValue>(Val)) {
1475 SI.setOperand(0, UndefValue::get(Val->getType()));
1476 if (Instruction *U = dyn_cast<Instruction>(Val))
1477 Worklist.Add(U); // Dropped a use.
1479 return nullptr; // Do not modify these!
1482 // store undef, Ptr -> noop
1483 if (isa<UndefValue>(Val))
1484 return eraseInstFromFunction(SI);
1486 // If this store is the second-to-last instruction in the basic block
1487 // (excluding debug info and bitcasts of pointers) and if the block ends with
1488 // an unconditional branch, try to move the store to the successor block.
1489 BBI = SI.getIterator();
1492 } while (isa<DbgInfoIntrinsic>(BBI) ||
1493 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1495 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1496 if (BI->isUnconditional())
1497 mergeStoreIntoSuccessor(SI);
1502 /// Try to transform:
1503 /// if () { *P = v1; } else { *P = v2 }
1505 /// *P = v1; if () { *P = v2; }
1506 /// into a phi node with a store in the successor.
1507 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) {
1508 assert(SI.isUnordered() &&
1509 "This code has not been audited for volatile or ordered store case.");
1511 // Check if the successor block has exactly 2 incoming edges.
1512 BasicBlock *StoreBB = SI.getParent();
1513 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1514 if (!DestBB->hasNPredecessors(2))
1517 // Capture the other block (the block that doesn't contain our store).
1518 pred_iterator PredIter = pred_begin(DestBB);
1519 if (*PredIter == StoreBB)
1521 BasicBlock *OtherBB = *PredIter;
1523 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1524 // for example, if SI is in an infinite loop.
1525 if (StoreBB == DestBB || OtherBB == DestBB)
1528 // Verify that the other block ends in a branch and is not otherwise empty.
1529 BasicBlock::iterator BBI(OtherBB->getTerminator());
1530 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1531 if (!OtherBr || BBI == OtherBB->begin())
1534 // If the other block ends in an unconditional branch, check for the 'if then
1535 // else' case. There is an instruction before the branch.
1536 StoreInst *OtherStore = nullptr;
1537 if (OtherBr->isUnconditional()) {
1539 // Skip over debugging info.
1540 while (isa<DbgInfoIntrinsic>(BBI) ||
1541 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1542 if (BBI==OtherBB->begin())
1546 // If this isn't a store, isn't a store to the same location, or is not the
1547 // right kind of store, bail out.
1548 OtherStore = dyn_cast<StoreInst>(BBI);
1549 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1550 !SI.isSameOperationAs(OtherStore))
1553 // Otherwise, the other block ended with a conditional branch. If one of the
1554 // destinations is StoreBB, then we have the if/then case.
1555 if (OtherBr->getSuccessor(0) != StoreBB &&
1556 OtherBr->getSuccessor(1) != StoreBB)
1559 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1560 // if/then triangle. See if there is a store to the same ptr as SI that
1561 // lives in OtherBB.
1563 // Check to see if we find the matching store.
1564 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1565 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1566 !SI.isSameOperationAs(OtherStore))
1570 // If we find something that may be using or overwriting the stored
1571 // value, or if we run out of instructions, we can't do the transform.
1572 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1573 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1577 // In order to eliminate the store in OtherBr, we have to make sure nothing
1578 // reads or overwrites the stored value in StoreBB.
1579 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1580 // FIXME: This should really be AA driven.
1581 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1586 // Insert a PHI node now if we need it.
1587 Value *MergedVal = OtherStore->getOperand(0);
1588 // The debug locations of the original instructions might differ. Merge them.
1589 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1590 OtherStore->getDebugLoc());
1591 if (MergedVal != SI.getOperand(0)) {
1592 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1593 PN->addIncoming(SI.getOperand(0), SI.getParent());
1594 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1595 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1596 PN->setDebugLoc(MergedLoc);
1599 // Advance to a place where it is safe to insert the new store and insert it.
1600 BBI = DestBB->getFirstInsertionPt();
1601 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(),
1602 MaybeAlign(SI.getAlignment()),
1603 SI.getOrdering(), SI.getSyncScopeID());
1604 InsertNewInstBefore(NewSI, *BBI);
1605 NewSI->setDebugLoc(MergedLoc);
1607 // If the two stores had AA tags, merge them.
1609 SI.getAAMetadata(AATags);
1611 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1612 NewSI->setAAMetadata(AATags);
1615 // Nuke the old stores.
1616 eraseInstFromFunction(SI);
1617 eraseInstFromFunction(*OtherStore);