1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/LLVMContext.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/IR/MDBuilder.h"
22 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
23 #include "llvm/Transforms/Utils/Local.h"
26 #define DEBUG_TYPE "instcombine"
28 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
29 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
31 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
32 /// some part of a constant global variable. This intentionally only accepts
33 /// constant expressions because we can't rewrite arbitrary instructions.
34 static bool pointsToConstantGlobal(Value *V) {
35 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
36 return GV->isConstant();
38 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
39 if (CE->getOpcode() == Instruction::BitCast ||
40 CE->getOpcode() == Instruction::AddrSpaceCast ||
41 CE->getOpcode() == Instruction::GetElementPtr)
42 return pointsToConstantGlobal(CE->getOperand(0));
47 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
48 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
49 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
50 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
51 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
52 /// the alloca, and if the source pointer is a pointer to a constant global, we
53 /// can optimize this.
55 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
56 SmallVectorImpl<Instruction *> &ToDelete) {
57 // We track lifetime intrinsics as we encounter them. If we decide to go
58 // ahead and replace the value with the global, this lets the caller quickly
59 // eliminate the markers.
61 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
62 ValuesToInspect.push_back(std::make_pair(V, false));
63 while (!ValuesToInspect.empty()) {
64 auto ValuePair = ValuesToInspect.pop_back_val();
65 const bool IsOffset = ValuePair.second;
66 for (auto &U : ValuePair.first->uses()) {
67 Instruction *I = cast<Instruction>(U.getUser());
69 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
70 // Ignore non-volatile loads, they are always ok.
71 if (!LI->isSimple()) return false;
75 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
76 // If uses of the bitcast are ok, we are ok.
77 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
80 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
81 // If the GEP has all zero indices, it doesn't offset the pointer. If it
83 ValuesToInspect.push_back(
84 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
88 if (auto CS = CallSite(I)) {
89 // If this is the function being called then we treat it like a load and
94 unsigned DataOpNo = CS.getDataOperandNo(&U);
95 bool IsArgOperand = CS.isArgOperand(&U);
97 // Inalloca arguments are clobbered by the call.
98 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
101 // If this is a readonly/readnone call site, then we know it is just a
102 // load (but one that potentially returns the value itself), so we can
103 // ignore it if we know that the value isn't captured.
104 if (CS.onlyReadsMemory() &&
105 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
108 // If this is being passed as a byval argument, the caller is making a
109 // copy, so it is only a read of the alloca.
110 if (IsArgOperand && CS.isByValArgument(DataOpNo))
114 // Lifetime intrinsics can be handled by the caller.
115 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
116 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
117 II->getIntrinsicID() == Intrinsic::lifetime_end) {
118 assert(II->use_empty() && "Lifetime markers have no result to use!");
119 ToDelete.push_back(II);
124 // If this is isn't our memcpy/memmove, reject it as something we can't
126 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
130 // If the transfer is using the alloca as a source of the transfer, then
131 // ignore it since it is a load (unless the transfer is volatile).
132 if (U.getOperandNo() == 1) {
133 if (MI->isVolatile()) return false;
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy) return false;
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset) return false;
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U.getOperandNo() != 0) return false;
147 // If the source of the memcpy/move is not a constant global, reject it.
148 if (!pointsToConstantGlobal(MI->getSource()))
151 // Otherwise, the transform is safe. Remember the copy instruction.
158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159 /// modified by a copy from a constant global. If we can prove this, we can
160 /// replace any uses of the alloca with uses of the global directly.
161 static MemTransferInst *
162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163 SmallVectorImpl<Instruction *> &ToDelete) {
164 MemTransferInst *TheCopy = nullptr;
165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
170 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
171 // Check for array size of 1 (scalar allocation).
172 if (!AI.isArrayAllocation()) {
173 // i32 1 is the canonical array size for scalar allocations.
174 if (AI.getArraySize()->getType()->isIntegerTy(32))
178 Value *V = IC.Builder->getInt32(1);
183 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
184 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
185 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
186 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
187 New->setAlignment(AI.getAlignment());
189 // Scan to the end of the allocation instructions, to skip over a block of
190 // allocas if possible...also skip interleaved debug info
192 BasicBlock::iterator It(New);
193 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
196 // Now that I is pointing to the first non-allocation-inst in the block,
197 // insert our getelementptr instruction...
199 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
200 Value *NullIdx = Constant::getNullValue(IdxTy);
201 Value *Idx[2] = {NullIdx, NullIdx};
203 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
204 IC.InsertNewInstBefore(GEP, *It);
206 // Now make everything use the getelementptr instead of the original
208 return IC.replaceInstUsesWith(AI, GEP);
211 if (isa<UndefValue>(AI.getArraySize()))
212 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
214 // Ensure that the alloca array size argument has type intptr_t, so that
215 // any casting is exposed early.
216 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
217 if (AI.getArraySize()->getType() != IntPtrTy) {
218 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
226 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
227 if (auto *I = simplifyAllocaArraySize(*this, AI))
230 if (AI.getAllocatedType()->isSized()) {
231 // If the alignment is 0 (unspecified), assign it the preferred alignment.
232 if (AI.getAlignment() == 0)
233 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
235 // Move all alloca's of zero byte objects to the entry block and merge them
236 // together. Note that we only do this for alloca's, because malloc should
237 // allocate and return a unique pointer, even for a zero byte allocation.
238 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
239 // For a zero sized alloca there is no point in doing an array allocation.
240 // This is helpful if the array size is a complicated expression not used
242 if (AI.isArrayAllocation()) {
243 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
247 // Get the first instruction in the entry block.
248 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
249 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
250 if (FirstInst != &AI) {
251 // If the entry block doesn't start with a zero-size alloca then move
252 // this one to the start of the entry block. There is no problem with
253 // dominance as the array size was forced to a constant earlier already.
254 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
255 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
256 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
257 AI.moveBefore(FirstInst);
261 // If the alignment of the entry block alloca is 0 (unspecified),
262 // assign it the preferred alignment.
263 if (EntryAI->getAlignment() == 0)
264 EntryAI->setAlignment(
265 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
266 // Replace this zero-sized alloca with the one at the start of the entry
267 // block after ensuring that the address will be aligned enough for both
269 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
271 EntryAI->setAlignment(MaxAlign);
272 if (AI.getType() != EntryAI->getType())
273 return new BitCastInst(EntryAI, AI.getType());
274 return replaceInstUsesWith(AI, EntryAI);
279 if (AI.getAlignment()) {
280 // Check to see if this allocation is only modified by a memcpy/memmove from
281 // a constant global whose alignment is equal to or exceeds that of the
282 // allocation. If this is the case, we can change all users to use
283 // the constant global instead. This is commonly produced by the CFE by
284 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
285 // is only subsequently read.
286 SmallVector<Instruction *, 4> ToDelete;
287 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
288 unsigned SourceAlign = getOrEnforceKnownAlignment(
289 Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
290 if (AI.getAlignment() <= SourceAlign) {
291 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
292 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
293 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
294 eraseInstFromFunction(*ToDelete[i]);
295 Constant *TheSrc = cast<Constant>(Copy->getSource());
297 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
298 Instruction *NewI = replaceInstUsesWith(AI, Cast);
299 eraseInstFromFunction(*Copy);
306 // At last, use the generic allocation site handler to aggressively remove
308 return visitAllocSite(AI);
311 /// \brief Helper to combine a load to a new type.
313 /// This just does the work of combining a load to a new type. It handles
314 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
315 /// loaded *value* type. This will convert it to a pointer, cast the operand to
316 /// that pointer type, load it, etc.
318 /// Note that this will create all of the instructions with whatever insert
319 /// point the \c InstCombiner currently is using.
320 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
321 const Twine &Suffix = "") {
322 Value *Ptr = LI.getPointerOperand();
323 unsigned AS = LI.getPointerAddressSpace();
324 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
325 LI.getAllMetadata(MD);
327 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
328 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
329 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
330 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
331 MDBuilder MDB(NewLoad->getContext());
332 for (const auto &MDPair : MD) {
333 unsigned ID = MDPair.first;
334 MDNode *N = MDPair.second;
335 // Note, essentially every kind of metadata should be preserved here! This
336 // routine is supposed to clone a load instruction changing *only its type*.
337 // The only metadata it makes sense to drop is metadata which is invalidated
338 // when the pointer type changes. This should essentially never be the case
339 // in LLVM, but we explicitly switch over only known metadata to be
340 // conservatively correct. If you are adding metadata to LLVM which pertains
341 // to loads, you almost certainly want to add it here.
343 case LLVMContext::MD_dbg:
344 case LLVMContext::MD_tbaa:
345 case LLVMContext::MD_prof:
346 case LLVMContext::MD_fpmath:
347 case LLVMContext::MD_tbaa_struct:
348 case LLVMContext::MD_invariant_load:
349 case LLVMContext::MD_alias_scope:
350 case LLVMContext::MD_noalias:
351 case LLVMContext::MD_nontemporal:
352 case LLVMContext::MD_mem_parallel_loop_access:
353 // All of these directly apply.
354 NewLoad->setMetadata(ID, N);
357 case LLVMContext::MD_nonnull:
358 // This only directly applies if the new type is also a pointer.
359 if (NewTy->isPointerTy()) {
360 NewLoad->setMetadata(ID, N);
363 // If it's integral now, translate it to !range metadata.
364 if (NewTy->isIntegerTy()) {
365 auto *ITy = cast<IntegerType>(NewTy);
366 auto *NullInt = ConstantExpr::getPtrToInt(
367 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
369 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
370 NewLoad->setMetadata(LLVMContext::MD_range,
371 MDB.createRange(NonNullInt, NullInt));
374 case LLVMContext::MD_align:
375 case LLVMContext::MD_dereferenceable:
376 case LLVMContext::MD_dereferenceable_or_null:
377 // These only directly apply if the new type is also a pointer.
378 if (NewTy->isPointerTy())
379 NewLoad->setMetadata(ID, N);
381 case LLVMContext::MD_range:
382 // FIXME: It would be nice to propagate this in some way, but the type
383 // conversions make it hard. If the new type is a pointer, we could
384 // translate it to !nonnull metadata.
391 /// \brief Combine a store to a new type.
393 /// Returns the newly created store instruction.
394 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
395 Value *Ptr = SI.getPointerOperand();
396 unsigned AS = SI.getPointerAddressSpace();
397 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
398 SI.getAllMetadata(MD);
400 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
401 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
402 SI.getAlignment(), SI.isVolatile());
403 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
404 for (const auto &MDPair : MD) {
405 unsigned ID = MDPair.first;
406 MDNode *N = MDPair.second;
407 // Note, essentially every kind of metadata should be preserved here! This
408 // routine is supposed to clone a store instruction changing *only its
409 // type*. The only metadata it makes sense to drop is metadata which is
410 // invalidated when the pointer type changes. This should essentially
411 // never be the case in LLVM, but we explicitly switch over only known
412 // metadata to be conservatively correct. If you are adding metadata to
413 // LLVM which pertains to stores, you almost certainly want to add it
416 case LLVMContext::MD_dbg:
417 case LLVMContext::MD_tbaa:
418 case LLVMContext::MD_prof:
419 case LLVMContext::MD_fpmath:
420 case LLVMContext::MD_tbaa_struct:
421 case LLVMContext::MD_alias_scope:
422 case LLVMContext::MD_noalias:
423 case LLVMContext::MD_nontemporal:
424 case LLVMContext::MD_mem_parallel_loop_access:
425 // All of these directly apply.
426 NewStore->setMetadata(ID, N);
429 case LLVMContext::MD_invariant_load:
430 case LLVMContext::MD_nonnull:
431 case LLVMContext::MD_range:
432 case LLVMContext::MD_align:
433 case LLVMContext::MD_dereferenceable:
434 case LLVMContext::MD_dereferenceable_or_null:
435 // These don't apply for stores.
443 /// \brief Combine loads to match the type of their uses' value after looking
444 /// through intervening bitcasts.
446 /// The core idea here is that if the result of a load is used in an operation,
447 /// we should load the type most conducive to that operation. For example, when
448 /// loading an integer and converting that immediately to a pointer, we should
449 /// instead directly load a pointer.
451 /// However, this routine must never change the width of a load or the number of
452 /// loads as that would introduce a semantic change. This combine is expected to
453 /// be a semantic no-op which just allows loads to more closely model the types
454 /// of their consuming operations.
456 /// Currently, we also refuse to change the precise type used for an atomic load
457 /// or a volatile load. This is debatable, and might be reasonable to change
458 /// later. However, it is risky in case some backend or other part of LLVM is
459 /// relying on the exact type loaded to select appropriate atomic operations.
460 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
461 // FIXME: We could probably with some care handle both volatile and ordered
462 // atomic loads here but it isn't clear that this is important.
463 if (!LI.isUnordered())
469 Type *Ty = LI.getType();
470 const DataLayout &DL = IC.getDataLayout();
472 // Try to canonicalize loads which are only ever stored to operate over
473 // integers instead of any other type. We only do this when the loaded type
474 // is sized and has a size exactly the same as its store size and the store
475 // size is a legal integer type.
476 if (!Ty->isIntegerTy() && Ty->isSized() &&
477 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
478 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
479 if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
480 auto *SI = dyn_cast<StoreInst>(U);
481 return SI && SI->getPointerOperand() != &LI;
483 LoadInst *NewLoad = combineLoadToNewType(
485 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
486 // Replace all the stores with stores of the newly loaded value.
487 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
488 auto *SI = cast<StoreInst>(*UI++);
489 IC.Builder->SetInsertPoint(SI);
490 combineStoreToNewValue(IC, *SI, NewLoad);
491 IC.eraseInstFromFunction(*SI);
493 assert(LI.use_empty() && "Failed to remove all users of the load!");
494 // Return the old load so the combiner can delete it safely.
499 // Fold away bit casts of the loaded value by loading the desired type.
500 // We can do this for BitCastInsts as well as casts from and to pointer types,
501 // as long as those are noops (i.e., the source or dest type have the same
502 // bitwidth as the target's pointers).
504 if (auto* CI = dyn_cast<CastInst>(LI.user_back())) {
505 if (CI->isNoopCast(DL)) {
506 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
507 CI->replaceAllUsesWith(NewLoad);
508 IC.eraseInstFromFunction(*CI);
513 // FIXME: We should also canonicalize loads of vectors when their elements are
514 // cast to other types.
518 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
519 // FIXME: We could probably with some care handle both volatile and atomic
520 // stores here but it isn't clear that this is important.
524 Type *T = LI.getType();
525 if (!T->isAggregateType())
528 StringRef Name = LI.getName();
529 assert(LI.getAlignment() && "Alignment must be set at this point");
531 if (auto *ST = dyn_cast<StructType>(T)) {
532 // If the struct only have one element, we unpack.
533 auto NumElements = ST->getNumElements();
534 if (NumElements == 1) {
535 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
537 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
538 UndefValue::get(T), NewLoad, 0, Name));
541 // We don't want to break loads with padding here as we'd loose
542 // the knowledge that padding exists for the rest of the pipeline.
543 const DataLayout &DL = IC.getDataLayout();
544 auto *SL = DL.getStructLayout(ST);
545 if (SL->hasPadding())
548 auto Align = LI.getAlignment();
550 Align = DL.getABITypeAlignment(ST);
552 auto *Addr = LI.getPointerOperand();
553 auto *IdxType = Type::getInt32Ty(T->getContext());
554 auto *Zero = ConstantInt::get(IdxType, 0);
556 Value *V = UndefValue::get(T);
557 for (unsigned i = 0; i < NumElements; i++) {
558 Value *Indices[2] = {
560 ConstantInt::get(IdxType, i),
562 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
564 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
565 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
566 V = IC.Builder->CreateInsertValue(V, L, i);
570 return IC.replaceInstUsesWith(LI, V);
573 if (auto *AT = dyn_cast<ArrayType>(T)) {
574 auto *ET = AT->getElementType();
575 auto NumElements = AT->getNumElements();
576 if (NumElements == 1) {
577 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
578 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
579 UndefValue::get(T), NewLoad, 0, Name));
582 // Bail out if the array is too large. Ideally we would like to optimize
583 // arrays of arbitrary size but this has a terrible impact on compile time.
584 // The threshold here is chosen arbitrarily, maybe needs a little bit of
586 if (NumElements > 1024)
589 const DataLayout &DL = IC.getDataLayout();
590 auto EltSize = DL.getTypeAllocSize(ET);
591 auto Align = LI.getAlignment();
593 Align = DL.getABITypeAlignment(T);
595 auto *Addr = LI.getPointerOperand();
596 auto *IdxType = Type::getInt64Ty(T->getContext());
597 auto *Zero = ConstantInt::get(IdxType, 0);
599 Value *V = UndefValue::get(T);
601 for (uint64_t i = 0; i < NumElements; i++) {
602 Value *Indices[2] = {
604 ConstantInt::get(IdxType, i),
606 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
608 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
610 V = IC.Builder->CreateInsertValue(V, L, i);
615 return IC.replaceInstUsesWith(LI, V);
621 // If we can determine that all possible objects pointed to by the provided
622 // pointer value are, not only dereferenceable, but also definitively less than
623 // or equal to the provided maximum size, then return true. Otherwise, return
624 // false (constant global values and allocas fall into this category).
626 // FIXME: This should probably live in ValueTracking (or similar).
627 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
628 const DataLayout &DL) {
629 SmallPtrSet<Value *, 4> Visited;
630 SmallVector<Value *, 4> Worklist(1, V);
633 Value *P = Worklist.pop_back_val();
634 P = P->stripPointerCasts();
636 if (!Visited.insert(P).second)
639 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
640 Worklist.push_back(SI->getTrueValue());
641 Worklist.push_back(SI->getFalseValue());
645 if (PHINode *PN = dyn_cast<PHINode>(P)) {
646 for (Value *IncValue : PN->incoming_values())
647 Worklist.push_back(IncValue);
651 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
652 if (GA->isInterposable())
654 Worklist.push_back(GA->getAliasee());
658 // If we know how big this object is, and it is less than MaxSize, continue
659 // searching. Otherwise, return false.
660 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
661 if (!AI->getAllocatedType()->isSized())
664 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
668 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
669 // Make sure that, even if the multiplication below would wrap as an
670 // uint64_t, we still do the right thing.
671 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
676 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
677 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
680 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
681 if (InitSize > MaxSize)
687 } while (!Worklist.empty());
692 // If we're indexing into an object of a known size, and the outer index is
693 // not a constant, but having any value but zero would lead to undefined
694 // behavior, replace it with zero.
696 // For example, if we have:
697 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
699 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
700 // ... = load i32* %arrayidx, align 4
701 // Then we know that we can replace %x in the GEP with i64 0.
703 // FIXME: We could fold any GEP index to zero that would cause UB if it were
704 // not zero. Currently, we only handle the first such index. Also, we could
705 // also search through non-zero constant indices if we kept track of the
706 // offsets those indices implied.
707 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
708 Instruction *MemI, unsigned &Idx) {
709 if (GEPI->getNumOperands() < 2)
712 // Find the first non-zero index of a GEP. If all indices are zero, return
713 // one past the last index.
714 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
716 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
717 Value *V = GEPI->getOperand(I);
718 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
728 // Skip through initial 'zero' indices, and find the corresponding pointer
729 // type. See if the next index is not a constant.
730 Idx = FirstNZIdx(GEPI);
731 if (Idx == GEPI->getNumOperands())
733 if (isa<Constant>(GEPI->getOperand(Idx)))
736 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
738 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
739 if (!AllocTy || !AllocTy->isSized())
741 const DataLayout &DL = IC.getDataLayout();
742 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
744 // If there are more indices after the one we might replace with a zero, make
745 // sure they're all non-negative. If any of them are negative, the overall
746 // address being computed might be before the base address determined by the
747 // first non-zero index.
748 auto IsAllNonNegative = [&]() {
749 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
750 bool KnownNonNegative, KnownNegative;
751 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
752 KnownNegative, 0, MemI);
753 if (KnownNonNegative)
761 // FIXME: If the GEP is not inbounds, and there are extra indices after the
762 // one we'll replace, those could cause the address computation to wrap
763 // (rendering the IsAllNonNegative() check below insufficient). We can do
764 // better, ignoring zero indices (and other indices we can prove small
765 // enough not to wrap).
766 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
769 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
770 // also known to be dereferenceable.
771 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
775 // If we're indexing into an object with a variable index for the memory
776 // access, but the object has only one element, we can assume that the index
777 // will always be zero. If we replace the GEP, return it.
778 template <typename T>
779 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
781 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
783 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
784 Instruction *NewGEPI = GEPI->clone();
785 NewGEPI->setOperand(Idx,
786 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
787 NewGEPI->insertBefore(GEPI);
788 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
796 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
797 Value *Op = LI.getOperand(0);
799 // Try to canonicalize the loaded type.
800 if (Instruction *Res = combineLoadToOperationType(*this, LI))
803 // Attempt to improve the alignment.
804 unsigned KnownAlign = getOrEnforceKnownAlignment(
805 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
806 unsigned LoadAlign = LI.getAlignment();
807 unsigned EffectiveLoadAlign =
808 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
810 if (KnownAlign > EffectiveLoadAlign)
811 LI.setAlignment(KnownAlign);
812 else if (LoadAlign == 0)
813 LI.setAlignment(EffectiveLoadAlign);
815 // Replace GEP indices if possible.
816 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
817 Worklist.Add(NewGEPI);
821 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
824 // Do really simple store-to-load forwarding and load CSE, to catch cases
825 // where there are several consecutive memory accesses to the same location,
826 // separated by a few arithmetic operations.
827 BasicBlock::iterator BBI(LI);
829 bool IsLoadCSE = false;
830 if (Value *AvailableVal =
831 FindAvailableLoadedValue(&LI, LI.getParent(), BBI,
832 DefMaxInstsToScan, AA, &AATags, &IsLoadCSE)) {
834 LoadInst *NLI = cast<LoadInst>(AvailableVal);
835 unsigned KnownIDs[] = {
836 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
837 LLVMContext::MD_noalias, LLVMContext::MD_range,
838 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
839 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
840 LLVMContext::MD_dereferenceable,
841 LLVMContext::MD_dereferenceable_or_null};
842 combineMetadata(NLI, &LI, KnownIDs);
845 return replaceInstUsesWith(
846 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
847 LI.getName() + ".cast"));
850 // None of the following transforms are legal for volatile/ordered atomic
851 // loads. Most of them do apply for unordered atomics.
852 if (!LI.isUnordered()) return nullptr;
854 // load(gep null, ...) -> unreachable
855 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
856 const Value *GEPI0 = GEPI->getOperand(0);
857 // TODO: Consider a target hook for valid address spaces for this xform.
858 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
859 // Insert a new store to null instruction before the load to indicate
860 // that this code is not reachable. We do this instead of inserting
861 // an unreachable instruction directly because we cannot modify the
863 new StoreInst(UndefValue::get(LI.getType()),
864 Constant::getNullValue(Op->getType()), &LI);
865 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
869 // load null/undef -> unreachable
870 // TODO: Consider a target hook for valid address spaces for this xform.
871 if (isa<UndefValue>(Op) ||
872 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
873 // Insert a new store to null instruction before the load to indicate that
874 // this code is not reachable. We do this instead of inserting an
875 // unreachable instruction directly because we cannot modify the CFG.
876 new StoreInst(UndefValue::get(LI.getType()),
877 Constant::getNullValue(Op->getType()), &LI);
878 return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
881 if (Op->hasOneUse()) {
882 // Change select and PHI nodes to select values instead of addresses: this
883 // helps alias analysis out a lot, allows many others simplifications, and
884 // exposes redundancy in the code.
886 // Note that we cannot do the transformation unless we know that the
887 // introduced loads cannot trap! Something like this is valid as long as
888 // the condition is always false: load (select bool %C, int* null, int* %G),
889 // but it would not be valid if we transformed it to load from null
892 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
893 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
894 unsigned Align = LI.getAlignment();
895 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
896 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
897 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
898 SI->getOperand(1)->getName()+".val");
899 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
900 SI->getOperand(2)->getName()+".val");
901 assert(LI.isUnordered() && "implied by above");
902 V1->setAlignment(Align);
903 V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
904 V2->setAlignment(Align);
905 V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
906 return SelectInst::Create(SI->getCondition(), V1, V2);
909 // load (select (cond, null, P)) -> load P
910 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
911 LI.getPointerAddressSpace() == 0) {
912 LI.setOperand(0, SI->getOperand(2));
916 // load (select (cond, P, null)) -> load P
917 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
918 LI.getPointerAddressSpace() == 0) {
919 LI.setOperand(0, SI->getOperand(1));
927 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
929 /// \returns underlying value that was "cast", or nullptr otherwise.
931 /// For example, if we have:
933 /// %E0 = extractelement <2 x double> %U, i32 0
934 /// %V0 = insertvalue [2 x double] undef, double %E0, 0
935 /// %E1 = extractelement <2 x double> %U, i32 1
936 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1
938 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
939 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
940 /// Note that %U may contain non-undef values where %V1 has undef.
941 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
943 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
944 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
947 auto *W = E->getVectorOperand();
952 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
953 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
955 V = IV->getAggregateOperand();
957 if (!isa<UndefValue>(V) ||!U)
960 auto *UT = cast<VectorType>(U->getType());
961 auto *VT = V->getType();
962 // Check that types UT and VT are bitwise isomorphic.
963 const auto &DL = IC.getDataLayout();
964 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
967 if (auto *AT = dyn_cast<ArrayType>(VT)) {
968 if (AT->getNumElements() != UT->getNumElements())
971 auto *ST = cast<StructType>(VT);
972 if (ST->getNumElements() != UT->getNumElements())
974 for (const auto *EltT : ST->elements()) {
975 if (EltT != UT->getElementType())
982 /// \brief Combine stores to match the type of value being stored.
984 /// The core idea here is that the memory does not have any intrinsic type and
985 /// where we can we should match the type of a store to the type of value being
988 /// However, this routine must never change the width of a store or the number of
989 /// stores as that would introduce a semantic change. This combine is expected to
990 /// be a semantic no-op which just allows stores to more closely model the types
991 /// of their incoming values.
993 /// Currently, we also refuse to change the precise type used for an atomic or
994 /// volatile store. This is debatable, and might be reasonable to change later.
995 /// However, it is risky in case some backend or other part of LLVM is relying
996 /// on the exact type stored to select appropriate atomic operations.
998 /// \returns true if the store was successfully combined away. This indicates
999 /// the caller must erase the store instruction. We have to let the caller erase
1000 /// the store instruction as otherwise there is no way to signal whether it was
1001 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1002 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1003 // FIXME: We could probably with some care handle both volatile and ordered
1004 // atomic stores here but it isn't clear that this is important.
1005 if (!SI.isUnordered())
1008 Value *V = SI.getValueOperand();
1010 // Fold away bit casts of the stored value by storing the original type.
1011 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1012 V = BC->getOperand(0);
1013 combineStoreToNewValue(IC, SI, V);
1017 if (Value *U = likeBitCastFromVector(IC, V)) {
1018 combineStoreToNewValue(IC, SI, U);
1022 // FIXME: We should also canonicalize stores of vectors when their elements
1023 // are cast to other types.
1027 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1028 // FIXME: We could probably with some care handle both volatile and atomic
1029 // stores here but it isn't clear that this is important.
1033 Value *V = SI.getValueOperand();
1034 Type *T = V->getType();
1036 if (!T->isAggregateType())
1039 if (auto *ST = dyn_cast<StructType>(T)) {
1040 // If the struct only have one element, we unpack.
1041 unsigned Count = ST->getNumElements();
1043 V = IC.Builder->CreateExtractValue(V, 0);
1044 combineStoreToNewValue(IC, SI, V);
1048 // We don't want to break loads with padding here as we'd loose
1049 // the knowledge that padding exists for the rest of the pipeline.
1050 const DataLayout &DL = IC.getDataLayout();
1051 auto *SL = DL.getStructLayout(ST);
1052 if (SL->hasPadding())
1055 auto Align = SI.getAlignment();
1057 Align = DL.getABITypeAlignment(ST);
1059 SmallString<16> EltName = V->getName();
1061 auto *Addr = SI.getPointerOperand();
1062 SmallString<16> AddrName = Addr->getName();
1063 AddrName += ".repack";
1065 auto *IdxType = Type::getInt32Ty(ST->getContext());
1066 auto *Zero = ConstantInt::get(IdxType, 0);
1067 for (unsigned i = 0; i < Count; i++) {
1068 Value *Indices[2] = {
1070 ConstantInt::get(IdxType, i),
1072 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1074 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1075 auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1076 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1082 if (auto *AT = dyn_cast<ArrayType>(T)) {
1083 // If the array only have one element, we unpack.
1084 auto NumElements = AT->getNumElements();
1085 if (NumElements == 1) {
1086 V = IC.Builder->CreateExtractValue(V, 0);
1087 combineStoreToNewValue(IC, SI, V);
1091 // Bail out if the array is too large. Ideally we would like to optimize
1092 // arrays of arbitrary size but this has a terrible impact on compile time.
1093 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1095 if (NumElements > 1024)
1098 const DataLayout &DL = IC.getDataLayout();
1099 auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1100 auto Align = SI.getAlignment();
1102 Align = DL.getABITypeAlignment(T);
1104 SmallString<16> EltName = V->getName();
1106 auto *Addr = SI.getPointerOperand();
1107 SmallString<16> AddrName = Addr->getName();
1108 AddrName += ".repack";
1110 auto *IdxType = Type::getInt64Ty(T->getContext());
1111 auto *Zero = ConstantInt::get(IdxType, 0);
1113 uint64_t Offset = 0;
1114 for (uint64_t i = 0; i < NumElements; i++) {
1115 Value *Indices[2] = {
1117 ConstantInt::get(IdxType, i),
1119 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1121 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1122 auto EltAlign = MinAlign(Align, Offset);
1123 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1133 /// equivalentAddressValues - Test if A and B will obviously have the same
1134 /// value. This includes recognizing that %t0 and %t1 will have the same
1135 /// value in code like this:
1136 /// %t0 = getelementptr \@a, 0, 3
1137 /// store i32 0, i32* %t0
1138 /// %t1 = getelementptr \@a, 0, 3
1139 /// %t2 = load i32* %t1
1141 static bool equivalentAddressValues(Value *A, Value *B) {
1142 // Test if the values are trivially equivalent.
1143 if (A == B) return true;
1145 // Test if the values come form identical arithmetic instructions.
1146 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1147 // its only used to compare two uses within the same basic block, which
1148 // means that they'll always either have the same value or one of them
1149 // will have an undefined value.
1150 if (isa<BinaryOperator>(A) ||
1153 isa<GetElementPtrInst>(A))
1154 if (Instruction *BI = dyn_cast<Instruction>(B))
1155 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1158 // Otherwise they may not be equivalent.
1162 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1163 Value *Val = SI.getOperand(0);
1164 Value *Ptr = SI.getOperand(1);
1166 // Try to canonicalize the stored type.
1167 if (combineStoreToValueType(*this, SI))
1168 return eraseInstFromFunction(SI);
1170 // Attempt to improve the alignment.
1171 unsigned KnownAlign = getOrEnforceKnownAlignment(
1172 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
1173 unsigned StoreAlign = SI.getAlignment();
1174 unsigned EffectiveStoreAlign =
1175 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1177 if (KnownAlign > EffectiveStoreAlign)
1178 SI.setAlignment(KnownAlign);
1179 else if (StoreAlign == 0)
1180 SI.setAlignment(EffectiveStoreAlign);
1182 // Try to canonicalize the stored type.
1183 if (unpackStoreToAggregate(*this, SI))
1184 return eraseInstFromFunction(SI);
1186 // Replace GEP indices if possible.
1187 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1188 Worklist.Add(NewGEPI);
1192 // Don't hack volatile/ordered stores.
1193 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1194 if (!SI.isUnordered()) return nullptr;
1196 // If the RHS is an alloca with a single use, zapify the store, making the
1198 if (Ptr->hasOneUse()) {
1199 if (isa<AllocaInst>(Ptr))
1200 return eraseInstFromFunction(SI);
1201 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1202 if (isa<AllocaInst>(GEP->getOperand(0))) {
1203 if (GEP->getOperand(0)->hasOneUse())
1204 return eraseInstFromFunction(SI);
1209 // Do really simple DSE, to catch cases where there are several consecutive
1210 // stores to the same location, separated by a few arithmetic operations. This
1211 // situation often occurs with bitfield accesses.
1212 BasicBlock::iterator BBI(SI);
1213 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1216 // Don't count debug info directives, lest they affect codegen,
1217 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1218 if (isa<DbgInfoIntrinsic>(BBI) ||
1219 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1224 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1225 // Prev store isn't volatile, and stores to the same location?
1226 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1227 SI.getOperand(1))) {
1230 eraseInstFromFunction(*PrevSI);
1236 // If this is a load, we have to stop. However, if the loaded value is from
1237 // the pointer we're loading and is producing the pointer we're storing,
1238 // then *this* store is dead (X = load P; store X -> P).
1239 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1240 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1241 assert(SI.isUnordered() && "can't eliminate ordering operation");
1242 return eraseInstFromFunction(SI);
1245 // Otherwise, this is a load from some other location. Stores before it
1250 // Don't skip over loads or things that can modify memory.
1251 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
1255 // store X, null -> turns into 'unreachable' in SimplifyCFG
1256 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1257 if (!isa<UndefValue>(Val)) {
1258 SI.setOperand(0, UndefValue::get(Val->getType()));
1259 if (Instruction *U = dyn_cast<Instruction>(Val))
1260 Worklist.Add(U); // Dropped a use.
1262 return nullptr; // Do not modify these!
1265 // store undef, Ptr -> noop
1266 if (isa<UndefValue>(Val))
1267 return eraseInstFromFunction(SI);
1269 // If this store is the last instruction in the basic block (possibly
1270 // excepting debug info instructions), and if the block ends with an
1271 // unconditional branch, try to move it to the successor block.
1272 BBI = SI.getIterator();
1275 } while (isa<DbgInfoIntrinsic>(BBI) ||
1276 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1277 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1278 if (BI->isUnconditional())
1279 if (SimplifyStoreAtEndOfBlock(SI))
1280 return nullptr; // xform done!
1285 /// SimplifyStoreAtEndOfBlock - Turn things like:
1286 /// if () { *P = v1; } else { *P = v2 }
1287 /// into a phi node with a store in the successor.
1289 /// Simplify things like:
1290 /// *P = v1; if () { *P = v2; }
1291 /// into a phi node with a store in the successor.
1293 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1294 assert(SI.isUnordered() &&
1295 "this code has not been auditted for volatile or ordered store case");
1297 BasicBlock *StoreBB = SI.getParent();
1299 // Check to see if the successor block has exactly two incoming edges. If
1300 // so, see if the other predecessor contains a store to the same location.
1301 // if so, insert a PHI node (if needed) and move the stores down.
1302 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1304 // Determine whether Dest has exactly two predecessors and, if so, compute
1305 // the other predecessor.
1306 pred_iterator PI = pred_begin(DestBB);
1307 BasicBlock *P = *PI;
1308 BasicBlock *OtherBB = nullptr;
1313 if (++PI == pred_end(DestBB))
1322 if (++PI != pred_end(DestBB))
1325 // Bail out if all the relevant blocks aren't distinct (this can happen,
1326 // for example, if SI is in an infinite loop)
1327 if (StoreBB == DestBB || OtherBB == DestBB)
1330 // Verify that the other block ends in a branch and is not otherwise empty.
1331 BasicBlock::iterator BBI(OtherBB->getTerminator());
1332 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1333 if (!OtherBr || BBI == OtherBB->begin())
1336 // If the other block ends in an unconditional branch, check for the 'if then
1337 // else' case. there is an instruction before the branch.
1338 StoreInst *OtherStore = nullptr;
1339 if (OtherBr->isUnconditional()) {
1341 // Skip over debugging info.
1342 while (isa<DbgInfoIntrinsic>(BBI) ||
1343 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1344 if (BBI==OtherBB->begin())
1348 // If this isn't a store, isn't a store to the same location, or is not the
1349 // right kind of store, bail out.
1350 OtherStore = dyn_cast<StoreInst>(BBI);
1351 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1352 !SI.isSameOperationAs(OtherStore))
1355 // Otherwise, the other block ended with a conditional branch. If one of the
1356 // destinations is StoreBB, then we have the if/then case.
1357 if (OtherBr->getSuccessor(0) != StoreBB &&
1358 OtherBr->getSuccessor(1) != StoreBB)
1361 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1362 // if/then triangle. See if there is a store to the same ptr as SI that
1363 // lives in OtherBB.
1365 // Check to see if we find the matching store.
1366 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1367 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1368 !SI.isSameOperationAs(OtherStore))
1372 // If we find something that may be using or overwriting the stored
1373 // value, or if we run out of instructions, we can't do the xform.
1374 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1375 BBI == OtherBB->begin())
1379 // In order to eliminate the store in OtherBr, we have to
1380 // make sure nothing reads or overwrites the stored value in
1382 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1383 // FIXME: This should really be AA driven.
1384 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1389 // Insert a PHI node now if we need it.
1390 Value *MergedVal = OtherStore->getOperand(0);
1391 if (MergedVal != SI.getOperand(0)) {
1392 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1393 PN->addIncoming(SI.getOperand(0), SI.getParent());
1394 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1395 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1398 // Advance to a place where it is safe to insert the new store and
1400 BBI = DestBB->getFirstInsertionPt();
1401 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1405 SI.getSynchScope());
1406 InsertNewInstBefore(NewSI, *BBI);
1407 NewSI->setDebugLoc(OtherStore->getDebugLoc());
1409 // If the two stores had AA tags, merge them.
1411 SI.getAAMetadata(AATags);
1413 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1414 NewSI->setAAMetadata(AATags);
1417 // Nuke the old stores.
1418 eraseInstFromFunction(SI);
1419 eraseInstFromFunction(*OtherStore);