1 //===- InstCombinePHI.cpp -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitPHINode function.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Transforms/Utils/Local.h"
22 using namespace llvm::PatternMatch;
24 #define DEBUG_TYPE "instcombine"
26 /// The PHI arguments will be folded into a single operation with a PHI node
27 /// as input. The debug location of the single operation will be the merged
28 /// locations of the original PHI node arguments.
29 void InstCombiner::PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN) {
30 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
31 Inst->setDebugLoc(FirstInst->getDebugLoc());
32 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
33 // will be inefficient.
34 assert(!isa<CallInst>(Inst));
36 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
37 auto *I = cast<Instruction>(PN.getIncomingValue(i));
38 Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
42 // Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
43 // If there is an existing pointer typed PHI that produces the same value as PN,
44 // replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
49 // int_init = PtrToInt(ptr_init)
52 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
53 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
54 // ptr_val2 = IntToPtr(int_val)
58 // inc_val_inc = PtrToInt(ptr_val_inc)
64 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
71 // int_ptr = BitCast(ptr_ptr)
72 // int_init = Load(int_ptr)
75 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
76 // ptr_val2 = IntToPtr(int_val)
80 // inc_val_inc = PtrToInt(ptr_val_inc)
83 // ptr_init = Load(ptr_ptr)
86 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
92 Instruction *InstCombiner::FoldIntegerTypedPHI(PHINode &PN) {
93 if (!PN.getType()->isIntegerTy())
98 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
102 // Check if the pointer is actually used as pointer:
103 auto HasPointerUse = [](Instruction *IIP) {
104 for (User *U : IIP->users()) {
105 Value *Ptr = nullptr;
106 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
107 Ptr = LoadI->getPointerOperand();
108 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
109 Ptr = SI->getPointerOperand();
110 } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
111 Ptr = GI->getPointerOperand();
114 if (Ptr && Ptr == IIP)
120 if (!HasPointerUse(IntToPtr))
123 if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
124 DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
127 SmallVector<Value *, 4> AvailablePtrVals;
128 for (unsigned i = 0; i != PN.getNumIncomingValues(); ++i) {
129 Value *Arg = PN.getIncomingValue(i);
131 // First look backward:
132 if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
133 AvailablePtrVals.emplace_back(PI->getOperand(0));
137 // Next look forward:
138 Value *ArgIntToPtr = nullptr;
139 for (User *U : Arg->users()) {
140 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
141 (DT.dominates(cast<Instruction>(U), PN.getIncomingBlock(i)) ||
142 cast<Instruction>(U)->getParent() == PN.getIncomingBlock(i))) {
149 AvailablePtrVals.emplace_back(ArgIntToPtr);
153 // If Arg is defined by a PHI, allow it. This will also create
154 // more opportunities iteratively.
155 if (isa<PHINode>(Arg)) {
156 AvailablePtrVals.emplace_back(Arg);
160 // For a single use integer load:
161 auto *LoadI = dyn_cast<LoadInst>(Arg);
165 if (!LoadI->hasOneUse())
168 // Push the integer typed Load instruction into the available
169 // value set, and fix it up later when the pointer typed PHI
171 AvailablePtrVals.emplace_back(LoadI);
174 // Now search for a matching PHI
175 auto *BB = PN.getParent();
176 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
177 "Not enough available ptr typed incoming values");
178 PHINode *MatchingPtrPHI = nullptr;
179 for (auto II = BB->begin(), EI = BasicBlock::iterator(BB->getFirstNonPHI());
181 PHINode *PtrPHI = dyn_cast<PHINode>(II);
182 if (!PtrPHI || PtrPHI == &PN || PtrPHI->getType() != IntToPtr->getType())
184 MatchingPtrPHI = PtrPHI;
185 for (unsigned i = 0; i != PtrPHI->getNumIncomingValues(); ++i) {
186 if (AvailablePtrVals[i] !=
187 PtrPHI->getIncomingValueForBlock(PN.getIncomingBlock(i))) {
188 MatchingPtrPHI = nullptr;
197 if (MatchingPtrPHI) {
198 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
199 "Phi's Type does not match with IntToPtr");
200 // The PtrToCast + IntToPtr will be simplified later
201 return CastInst::CreateBitOrPointerCast(MatchingPtrPHI,
202 IntToPtr->getOperand(0)->getType());
205 // If it requires a conversion for every PHI operand, do not do it.
206 if (std::all_of(AvailablePtrVals.begin(), AvailablePtrVals.end(),
208 return (V->getType() != IntToPtr->getType()) ||
209 isa<IntToPtrInst>(V);
213 // If any of the operand that requires casting is a terminator
214 // instruction, do not do it.
215 if (std::any_of(AvailablePtrVals.begin(), AvailablePtrVals.end(),
217 return (V->getType() != IntToPtr->getType()) &&
218 isa<TerminatorInst>(V);
222 PHINode *NewPtrPHI = PHINode::Create(
223 IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
225 InsertNewInstBefore(NewPtrPHI, PN);
226 SmallDenseMap<Value *, Instruction *> Casts;
227 for (unsigned i = 0; i != PN.getNumIncomingValues(); ++i) {
228 auto *IncomingBB = PN.getIncomingBlock(i);
229 auto *IncomingVal = AvailablePtrVals[i];
231 if (IncomingVal->getType() == IntToPtr->getType()) {
232 NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
237 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
238 assert((isa<PHINode>(IncomingVal) ||
239 IncomingVal->getType()->isPointerTy() ||
240 (LoadI && LoadI->hasOneUse())) &&
241 "Can not replace LoadInst with multiple uses");
243 // Need to insert a BitCast.
244 // For an integer Load instruction with a single use, the load + IntToPtr
245 // cast will be simplified into a pointer load:
246 // %v = load i64, i64* %a.ip, align 8
247 // %v.cast = inttoptr i64 %v to float **
249 // %v.ptrp = bitcast i64 * %a.ip to float **
250 // %v.cast = load float *, float ** %v.ptrp, align 8
251 Instruction *&CI = Casts[IncomingVal];
253 CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
254 IncomingVal->getName() + ".ptr");
255 if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
256 BasicBlock::iterator InsertPos(IncomingI);
258 if (isa<PHINode>(IncomingI))
259 InsertPos = IncomingI->getParent()->getFirstInsertionPt();
260 InsertNewInstBefore(CI, *InsertPos);
262 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
263 InsertNewInstBefore(CI, *InsertBB->getFirstInsertionPt());
266 NewPtrPHI->addIncoming(CI, IncomingBB);
269 // The PtrToCast + IntToPtr will be simplified later
270 return CastInst::CreateBitOrPointerCast(NewPtrPHI,
271 IntToPtr->getOperand(0)->getType());
274 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
275 /// adds all have a single use, turn this into a phi and a single binop.
276 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
277 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
278 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
279 unsigned Opc = FirstInst->getOpcode();
280 Value *LHSVal = FirstInst->getOperand(0);
281 Value *RHSVal = FirstInst->getOperand(1);
283 Type *LHSType = LHSVal->getType();
284 Type *RHSType = RHSVal->getType();
286 // Scan to see if all operands are the same opcode, and all have one use.
287 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
288 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
289 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
290 // Verify type of the LHS matches so we don't fold cmp's of different
292 I->getOperand(0)->getType() != LHSType ||
293 I->getOperand(1)->getType() != RHSType)
296 // If they are CmpInst instructions, check their predicates
297 if (CmpInst *CI = dyn_cast<CmpInst>(I))
298 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
301 // Keep track of which operand needs a phi node.
302 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
303 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
306 // If both LHS and RHS would need a PHI, don't do this transformation,
307 // because it would increase the number of PHIs entering the block,
308 // which leads to higher register pressure. This is especially
309 // bad when the PHIs are in the header of a loop.
310 if (!LHSVal && !RHSVal)
313 // Otherwise, this is safe to transform!
315 Value *InLHS = FirstInst->getOperand(0);
316 Value *InRHS = FirstInst->getOperand(1);
317 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
319 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
320 FirstInst->getOperand(0)->getName() + ".pn");
321 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
322 InsertNewInstBefore(NewLHS, PN);
327 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
328 FirstInst->getOperand(1)->getName() + ".pn");
329 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
330 InsertNewInstBefore(NewRHS, PN);
334 // Add all operands to the new PHIs.
335 if (NewLHS || NewRHS) {
336 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
337 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
339 Value *NewInLHS = InInst->getOperand(0);
340 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
343 Value *NewInRHS = InInst->getOperand(1);
344 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
349 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
350 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
352 PHIArgMergedDebugLoc(NewCI, PN);
356 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
357 BinaryOperator *NewBinOp =
358 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
360 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
362 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
363 NewBinOp->andIRFlags(PN.getIncomingValue(i));
365 PHIArgMergedDebugLoc(NewBinOp, PN);
369 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
370 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
372 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
373 FirstInst->op_end());
374 // This is true if all GEP bases are allocas and if all indices into them are
376 bool AllBasePointersAreAllocas = true;
378 // We don't want to replace this phi if the replacement would require
379 // more than one phi, which leads to higher register pressure. This is
380 // especially bad when the PHIs are in the header of a loop.
381 bool NeededPhi = false;
383 bool AllInBounds = true;
385 // Scan to see if all operands are the same opcode, and all have one use.
386 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
387 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
388 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
389 GEP->getNumOperands() != FirstInst->getNumOperands())
392 AllInBounds &= GEP->isInBounds();
394 // Keep track of whether or not all GEPs are of alloca pointers.
395 if (AllBasePointersAreAllocas &&
396 (!isa<AllocaInst>(GEP->getOperand(0)) ||
397 !GEP->hasAllConstantIndices()))
398 AllBasePointersAreAllocas = false;
400 // Compare the operand lists.
401 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
402 if (FirstInst->getOperand(op) == GEP->getOperand(op))
405 // Don't merge two GEPs when two operands differ (introducing phi nodes)
406 // if one of the PHIs has a constant for the index. The index may be
407 // substantially cheaper to compute for the constants, so making it a
408 // variable index could pessimize the path. This also handles the case
409 // for struct indices, which must always be constant.
410 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
411 isa<ConstantInt>(GEP->getOperand(op)))
414 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
417 // If we already needed a PHI for an earlier operand, and another operand
418 // also requires a PHI, we'd be introducing more PHIs than we're
419 // eliminating, which increases register pressure on entry to the PHI's
424 FixedOperands[op] = nullptr; // Needs a PHI.
429 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
430 // bother doing this transformation. At best, this will just save a bit of
431 // offset calculation, but all the predecessors will have to materialize the
432 // stack address into a register anyway. We'd actually rather *clone* the
433 // load up into the predecessors so that we have a load of a gep of an alloca,
434 // which can usually all be folded into the load.
435 if (AllBasePointersAreAllocas)
438 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
440 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
442 bool HasAnyPHIs = false;
443 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
444 if (FixedOperands[i]) continue; // operand doesn't need a phi.
445 Value *FirstOp = FirstInst->getOperand(i);
446 PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
447 FirstOp->getName()+".pn");
448 InsertNewInstBefore(NewPN, PN);
450 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
451 OperandPhis[i] = NewPN;
452 FixedOperands[i] = NewPN;
457 // Add all operands to the new PHIs.
459 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
460 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
461 BasicBlock *InBB = PN.getIncomingBlock(i);
463 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
464 if (PHINode *OpPhi = OperandPhis[op])
465 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
469 Value *Base = FixedOperands[0];
470 GetElementPtrInst *NewGEP =
471 GetElementPtrInst::Create(FirstInst->getSourceElementType(), Base,
472 makeArrayRef(FixedOperands).slice(1));
473 if (AllInBounds) NewGEP->setIsInBounds();
474 PHIArgMergedDebugLoc(NewGEP, PN);
479 /// Return true if we know that it is safe to sink the load out of the block
480 /// that defines it. This means that it must be obvious the value of the load is
481 /// not changed from the point of the load to the end of the block it is in.
483 /// Finally, it is safe, but not profitable, to sink a load targeting a
484 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
486 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
487 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
489 for (++BBI; BBI != E; ++BBI)
490 if (BBI->mayWriteToMemory())
493 // Check for non-address taken alloca. If not address-taken already, it isn't
494 // profitable to do this xform.
495 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
496 bool isAddressTaken = false;
497 for (User *U : AI->users()) {
498 if (isa<LoadInst>(U)) continue;
499 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
500 // If storing TO the alloca, then the address isn't taken.
501 if (SI->getOperand(1) == AI) continue;
503 isAddressTaken = true;
507 if (!isAddressTaken && AI->isStaticAlloca())
511 // If this load is a load from a GEP with a constant offset from an alloca,
512 // then we don't want to sink it. In its present form, it will be
513 // load [constant stack offset]. Sinking it will cause us to have to
514 // materialize the stack addresses in each predecessor in a register only to
515 // do a shared load from register in the successor.
516 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
517 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
518 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
524 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
525 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
527 // FIXME: This is overconservative; this transform is allowed in some cases
528 // for atomic operations.
529 if (FirstLI->isAtomic())
532 // When processing loads, we need to propagate two bits of information to the
533 // sunk load: whether it is volatile, and what its alignment is. We currently
534 // don't sink loads when some have their alignment specified and some don't.
535 // visitLoadInst will propagate an alignment onto the load when TD is around,
536 // and if TD isn't around, we can't handle the mixed case.
537 bool isVolatile = FirstLI->isVolatile();
538 unsigned LoadAlignment = FirstLI->getAlignment();
539 unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
541 // We can't sink the load if the loaded value could be modified between the
543 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
544 !isSafeAndProfitableToSinkLoad(FirstLI))
547 // If the PHI is of volatile loads and the load block has multiple
548 // successors, sinking it would remove a load of the volatile value from
549 // the path through the other successor.
551 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
554 // Check to see if all arguments are the same operation.
555 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
556 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
557 if (!LI || !LI->hasOneUse())
560 // We can't sink the load if the loaded value could be modified between
561 // the load and the PHI.
562 if (LI->isVolatile() != isVolatile ||
563 LI->getParent() != PN.getIncomingBlock(i) ||
564 LI->getPointerAddressSpace() != LoadAddrSpace ||
565 !isSafeAndProfitableToSinkLoad(LI))
568 // If some of the loads have an alignment specified but not all of them,
569 // we can't do the transformation.
570 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
573 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
575 // If the PHI is of volatile loads and the load block has multiple
576 // successors, sinking it would remove a load of the volatile value from
577 // the path through the other successor.
579 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
583 // Okay, they are all the same operation. Create a new PHI node of the
584 // correct type, and PHI together all of the LHS's of the instructions.
585 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
586 PN.getNumIncomingValues(),
589 Value *InVal = FirstLI->getOperand(0);
590 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
591 LoadInst *NewLI = new LoadInst(NewPN, "", isVolatile, LoadAlignment);
593 unsigned KnownIDs[] = {
594 LLVMContext::MD_tbaa,
595 LLVMContext::MD_range,
596 LLVMContext::MD_invariant_load,
597 LLVMContext::MD_alias_scope,
598 LLVMContext::MD_noalias,
599 LLVMContext::MD_nonnull,
600 LLVMContext::MD_align,
601 LLVMContext::MD_dereferenceable,
602 LLVMContext::MD_dereferenceable_or_null,
605 for (unsigned ID : KnownIDs)
606 NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
608 // Add all operands to the new PHI and combine TBAA metadata.
609 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
610 LoadInst *LI = cast<LoadInst>(PN.getIncomingValue(i));
611 combineMetadata(NewLI, LI, KnownIDs);
612 Value *NewInVal = LI->getOperand(0);
613 if (NewInVal != InVal)
615 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
619 // The new PHI unions all of the same values together. This is really
620 // common, so we handle it intelligently here for compile-time speed.
621 NewLI->setOperand(0, InVal);
624 InsertNewInstBefore(NewPN, PN);
627 // If this was a volatile load that we are merging, make sure to loop through
628 // and mark all the input loads as non-volatile. If we don't do this, we will
629 // insert a new volatile load and the old ones will not be deletable.
631 for (Value *IncValue : PN.incoming_values())
632 cast<LoadInst>(IncValue)->setVolatile(false);
634 PHIArgMergedDebugLoc(NewLI, PN);
638 /// TODO: This function could handle other cast types, but then it might
639 /// require special-casing a cast from the 'i1' type. See the comment in
640 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
641 Instruction *InstCombiner::FoldPHIArgZextsIntoPHI(PHINode &Phi) {
642 // We cannot create a new instruction after the PHI if the terminator is an
643 // EHPad because there is no valid insertion point.
644 if (TerminatorInst *TI = Phi.getParent()->getTerminator())
648 // Early exit for the common case of a phi with two operands. These are
649 // handled elsewhere. See the comment below where we check the count of zexts
650 // and constants for more details.
651 unsigned NumIncomingValues = Phi.getNumIncomingValues();
652 if (NumIncomingValues < 3)
655 // Find the narrower type specified by the first zext.
656 Type *NarrowType = nullptr;
657 for (Value *V : Phi.incoming_values()) {
658 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
659 NarrowType = Zext->getSrcTy();
666 // Walk the phi operands checking that we only have zexts or constants that
667 // we can shrink for free. Store the new operands for the new phi.
668 SmallVector<Value *, 4> NewIncoming;
669 unsigned NumZexts = 0;
670 unsigned NumConsts = 0;
671 for (Value *V : Phi.incoming_values()) {
672 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
673 // All zexts must be identical and have one use.
674 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUse())
676 NewIncoming.push_back(Zext->getOperand(0));
678 } else if (auto *C = dyn_cast<Constant>(V)) {
679 // Make sure that constants can fit in the new type.
680 Constant *Trunc = ConstantExpr::getTrunc(C, NarrowType);
681 if (ConstantExpr::getZExt(Trunc, C->getType()) != C)
683 NewIncoming.push_back(Trunc);
686 // If it's not a cast or a constant, bail out.
691 // The more common cases of a phi with no constant operands or just one
692 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
693 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
694 // performed here. It tries to replicate a cast in the phi operand's basic
695 // block to expose other folding opportunities. Thus, InstCombine will
696 // infinite loop without this check.
697 if (NumConsts == 0 || NumZexts < 2)
700 // All incoming values are zexts or constants that are safe to truncate.
701 // Create a new phi node of the narrow type, phi together all of the new
702 // operands, and zext the result back to the original type.
703 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
704 Phi.getName() + ".shrunk");
705 for (unsigned i = 0; i != NumIncomingValues; ++i)
706 NewPhi->addIncoming(NewIncoming[i], Phi.getIncomingBlock(i));
708 InsertNewInstBefore(NewPhi, Phi);
709 return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
712 /// If all operands to a PHI node are the same "unary" operator and they all are
713 /// only used by the PHI, PHI together their inputs, and do the operation once,
714 /// to the result of the PHI.
715 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
716 // We cannot create a new instruction after the PHI if the terminator is an
717 // EHPad because there is no valid insertion point.
718 if (TerminatorInst *TI = PN.getParent()->getTerminator())
722 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
724 if (isa<GetElementPtrInst>(FirstInst))
725 return FoldPHIArgGEPIntoPHI(PN);
726 if (isa<LoadInst>(FirstInst))
727 return FoldPHIArgLoadIntoPHI(PN);
729 // Scan the instruction, looking for input operations that can be folded away.
730 // If all input operands to the phi are the same instruction (e.g. a cast from
731 // the same type or "+42") we can pull the operation through the PHI, reducing
732 // code size and simplifying code.
733 Constant *ConstantOp = nullptr;
734 Type *CastSrcTy = nullptr;
736 if (isa<CastInst>(FirstInst)) {
737 CastSrcTy = FirstInst->getOperand(0)->getType();
739 // Be careful about transforming integer PHIs. We don't want to pessimize
740 // the code by turning an i32 into an i1293.
741 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
742 if (!shouldChangeType(PN.getType(), CastSrcTy))
745 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
746 // Can fold binop, compare or shift here if the RHS is a constant,
747 // otherwise call FoldPHIArgBinOpIntoPHI.
748 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
750 return FoldPHIArgBinOpIntoPHI(PN);
752 return nullptr; // Cannot fold this operation.
755 // Check to see if all arguments are the same operation.
756 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
757 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
758 if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
761 if (I->getOperand(0)->getType() != CastSrcTy)
762 return nullptr; // Cast operation must match.
763 } else if (I->getOperand(1) != ConstantOp) {
768 // Okay, they are all the same operation. Create a new PHI node of the
769 // correct type, and PHI together all of the LHS's of the instructions.
770 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
771 PN.getNumIncomingValues(),
774 Value *InVal = FirstInst->getOperand(0);
775 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
777 // Add all operands to the new PHI.
778 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
779 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
780 if (NewInVal != InVal)
782 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
787 // The new PHI unions all of the same values together. This is really
788 // common, so we handle it intelligently here for compile-time speed.
792 InsertNewInstBefore(NewPN, PN);
796 // Insert and return the new operation.
797 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
798 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
800 PHIArgMergedDebugLoc(NewCI, PN);
804 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
805 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
806 BinOp->copyIRFlags(PN.getIncomingValue(0));
808 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
809 BinOp->andIRFlags(PN.getIncomingValue(i));
811 PHIArgMergedDebugLoc(BinOp, PN);
815 CmpInst *CIOp = cast<CmpInst>(FirstInst);
816 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
818 PHIArgMergedDebugLoc(NewCI, PN);
822 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
823 static bool DeadPHICycle(PHINode *PN,
824 SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
825 if (PN->use_empty()) return true;
826 if (!PN->hasOneUse()) return false;
828 // Remember this node, and if we find the cycle, return.
829 if (!PotentiallyDeadPHIs.insert(PN).second)
832 // Don't scan crazily complex things.
833 if (PotentiallyDeadPHIs.size() == 16)
836 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
837 return DeadPHICycle(PU, PotentiallyDeadPHIs);
842 /// Return true if this phi node is always equal to NonPhiInVal.
843 /// This happens with mutually cyclic phi nodes like:
844 /// z = some value; x = phi (y, z); y = phi (x, z)
845 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
846 SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
847 // See if we already saw this PHI node.
848 if (!ValueEqualPHIs.insert(PN).second)
851 // Don't scan crazily complex things.
852 if (ValueEqualPHIs.size() == 16)
855 // Scan the operands to see if they are either phi nodes or are equal to
857 for (Value *Op : PN->incoming_values()) {
858 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
859 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
861 } else if (Op != NonPhiInVal)
868 /// Return an existing non-zero constant if this phi node has one, otherwise
869 /// return constant 1.
870 static ConstantInt *GetAnyNonZeroConstInt(PHINode &PN) {
871 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
872 for (Value *V : PN.operands())
873 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
874 if (!ConstVA->isZero())
876 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
880 struct PHIUsageRecord {
881 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
882 unsigned Shift; // The amount shifted.
883 Instruction *Inst; // The trunc instruction.
885 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
886 : PHIId(pn), Shift(Sh), Inst(User) {}
888 bool operator<(const PHIUsageRecord &RHS) const {
889 if (PHIId < RHS.PHIId) return true;
890 if (PHIId > RHS.PHIId) return false;
891 if (Shift < RHS.Shift) return true;
892 if (Shift > RHS.Shift) return false;
893 return Inst->getType()->getPrimitiveSizeInBits() <
894 RHS.Inst->getType()->getPrimitiveSizeInBits();
898 struct LoweredPHIRecord {
899 PHINode *PN; // The PHI that was lowered.
900 unsigned Shift; // The amount shifted.
901 unsigned Width; // The width extracted.
903 LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
904 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
906 // Ctor form used by DenseMap.
907 LoweredPHIRecord(PHINode *pn, unsigned Sh)
908 : PN(pn), Shift(Sh), Width(0) {}
914 struct DenseMapInfo<LoweredPHIRecord> {
915 static inline LoweredPHIRecord getEmptyKey() {
916 return LoweredPHIRecord(nullptr, 0);
918 static inline LoweredPHIRecord getTombstoneKey() {
919 return LoweredPHIRecord(nullptr, 1);
921 static unsigned getHashValue(const LoweredPHIRecord &Val) {
922 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
925 static bool isEqual(const LoweredPHIRecord &LHS,
926 const LoweredPHIRecord &RHS) {
927 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
928 LHS.Width == RHS.Width;
934 /// This is an integer PHI and we know that it has an illegal type: see if it is
935 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
936 /// the various pieces being extracted. This sort of thing is introduced when
937 /// SROA promotes an aggregate to large integer values.
939 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
940 /// inttoptr. We should produce new PHIs in the right type.
942 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
943 // PHIUsers - Keep track of all of the truncated values extracted from a set
944 // of PHIs, along with their offset. These are the things we want to rewrite.
945 SmallVector<PHIUsageRecord, 16> PHIUsers;
947 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
948 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
949 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
950 // check the uses of (to ensure they are all extracts).
951 SmallVector<PHINode*, 8> PHIsToSlice;
952 SmallPtrSet<PHINode*, 8> PHIsInspected;
954 PHIsToSlice.push_back(&FirstPhi);
955 PHIsInspected.insert(&FirstPhi);
957 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
958 PHINode *PN = PHIsToSlice[PHIId];
960 // Scan the input list of the PHI. If any input is an invoke, and if the
961 // input is defined in the predecessor, then we won't be split the critical
962 // edge which is required to insert a truncate. Because of this, we have to
964 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
965 InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
967 if (II->getParent() != PN->getIncomingBlock(i))
970 // If we have a phi, and if it's directly in the predecessor, then we have
971 // a critical edge where we need to put the truncate. Since we can't
972 // split the edge in instcombine, we have to bail out.
976 for (User *U : PN->users()) {
977 Instruction *UserI = cast<Instruction>(U);
979 // If the user is a PHI, inspect its uses recursively.
980 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
981 if (PHIsInspected.insert(UserPN).second)
982 PHIsToSlice.push_back(UserPN);
986 // Truncates are always ok.
987 if (isa<TruncInst>(UserI)) {
988 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
992 // Otherwise it must be a lshr which can only be used by one trunc.
993 if (UserI->getOpcode() != Instruction::LShr ||
994 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
995 !isa<ConstantInt>(UserI->getOperand(1)))
998 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
999 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1003 // If we have no users, they must be all self uses, just nuke the PHI.
1004 if (PHIUsers.empty())
1005 return replaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
1007 // If this phi node is transformable, create new PHIs for all the pieces
1008 // extracted out of it. First, sort the users by their offset and size.
1009 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1011 DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1012 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
1013 dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
1016 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1017 // hoisted out here to avoid construction/destruction thrashing.
1018 DenseMap<BasicBlock*, Value*> PredValues;
1020 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1021 // introduce redundant PHIs.
1022 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
1024 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1025 unsigned PHIId = PHIUsers[UserI].PHIId;
1026 PHINode *PN = PHIsToSlice[PHIId];
1027 unsigned Offset = PHIUsers[UserI].Shift;
1028 Type *Ty = PHIUsers[UserI].Inst->getType();
1032 // If we've already lowered a user like this, reuse the previously lowered
1034 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1036 // Otherwise, Create the new PHI node for this user.
1037 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1038 PN->getName()+".off"+Twine(Offset), PN);
1039 assert(EltPHI->getType() != PN->getType() &&
1040 "Truncate didn't shrink phi?");
1042 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1043 BasicBlock *Pred = PN->getIncomingBlock(i);
1044 Value *&PredVal = PredValues[Pred];
1046 // If we already have a value for this predecessor, reuse it.
1048 EltPHI->addIncoming(PredVal, Pred);
1052 // Handle the PHI self-reuse case.
1053 Value *InVal = PN->getIncomingValue(i);
1056 EltPHI->addIncoming(PredVal, Pred);
1060 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1061 // If the incoming value was a PHI, and if it was one of the PHIs we
1062 // already rewrote it, just use the lowered value.
1063 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1065 EltPHI->addIncoming(PredVal, Pred);
1070 // Otherwise, do an extract in the predecessor.
1071 Builder.SetInsertPoint(Pred->getTerminator());
1074 Res = Builder.CreateLShr(Res, ConstantInt::get(InVal->getType(),
1075 Offset), "extract");
1076 Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1078 EltPHI->addIncoming(Res, Pred);
1080 // If the incoming value was a PHI, and if it was one of the PHIs we are
1081 // rewriting, we will ultimately delete the code we inserted. This
1082 // means we need to revisit that PHI to make sure we extract out the
1084 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
1085 if (PHIsInspected.count(OldInVal)) {
1087 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1088 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
1089 cast<Instruction>(Res)));
1095 DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1096 << *EltPHI << '\n');
1097 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1100 // Replace the use of this piece with the PHI node.
1101 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1104 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1106 Value *Undef = UndefValue::get(FirstPhi.getType());
1107 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
1108 replaceInstUsesWith(*PHIsToSlice[i], Undef);
1109 return replaceInstUsesWith(FirstPhi, Undef);
1112 // PHINode simplification
1114 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
1115 if (Value *V = SimplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1116 return replaceInstUsesWith(PN, V);
1118 if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN))
1121 // If all PHI operands are the same operation, pull them through the PHI,
1122 // reducing code size.
1123 if (isa<Instruction>(PN.getIncomingValue(0)) &&
1124 isa<Instruction>(PN.getIncomingValue(1)) &&
1125 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
1126 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
1127 // FIXME: The hasOneUse check will fail for PHIs that use the value more
1128 // than themselves more than once.
1129 PN.getIncomingValue(0)->hasOneUse())
1130 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
1133 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1134 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1135 // PHI)... break the cycle.
1136 if (PN.hasOneUse()) {
1137 if (Instruction *Result = FoldIntegerTypedPHI(PN))
1140 Instruction *PHIUser = cast<Instruction>(PN.user_back());
1141 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1142 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
1143 PotentiallyDeadPHIs.insert(&PN);
1144 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
1145 return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
1148 // If this phi has a single use, and if that use just computes a value for
1149 // the next iteration of a loop, delete the phi. This occurs with unused
1150 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1151 // common case here is good because the only other things that catch this
1152 // are induction variable analysis (sometimes) and ADCE, which is only run
1154 if (PHIUser->hasOneUse() &&
1155 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
1156 PHIUser->user_back() == &PN) {
1157 return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
1159 // When a PHI is used only to be compared with zero, it is safe to replace
1160 // an incoming value proved as known nonzero with any non-zero constant.
1161 // For example, in the code below, the incoming value %v can be replaced
1162 // with any non-zero constant based on the fact that the PHI is only used to
1163 // be compared with zero and %v is a known non-zero value:
1164 // %v = select %cond, 1, 2
1165 // %p = phi [%v, BB] ...
1167 auto *CmpInst = dyn_cast<ICmpInst>(PHIUser);
1168 // FIXME: To be simple, handle only integer type for now.
1169 if (CmpInst && isa<IntegerType>(PN.getType()) && CmpInst->isEquality() &&
1170 match(CmpInst->getOperand(1), m_Zero())) {
1171 ConstantInt *NonZeroConst = nullptr;
1172 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1173 Instruction *CtxI = PN.getIncomingBlock(i)->getTerminator();
1174 Value *VA = PN.getIncomingValue(i);
1175 if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
1177 NonZeroConst = GetAnyNonZeroConstInt(PN);
1178 PN.setIncomingValue(i, NonZeroConst);
1184 // We sometimes end up with phi cycles that non-obviously end up being the
1185 // same value, for example:
1186 // z = some value; x = phi (y, z); y = phi (x, z)
1187 // where the phi nodes don't necessarily need to be in the same block. Do a
1188 // quick check to see if the PHI node only contains a single non-phi value, if
1189 // so, scan to see if the phi cycle is actually equal to that value.
1191 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1192 // Scan for the first non-phi operand.
1193 while (InValNo != NumIncomingVals &&
1194 isa<PHINode>(PN.getIncomingValue(InValNo)))
1197 if (InValNo != NumIncomingVals) {
1198 Value *NonPhiInVal = PN.getIncomingValue(InValNo);
1200 // Scan the rest of the operands to see if there are any conflicts, if so
1201 // there is no need to recursively scan other phis.
1202 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1203 Value *OpVal = PN.getIncomingValue(InValNo);
1204 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1208 // If we scanned over all operands, then we have one unique value plus
1209 // phi values. Scan PHI nodes to see if they all merge in each other or
1211 if (InValNo == NumIncomingVals) {
1212 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
1213 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1214 return replaceInstUsesWith(PN, NonPhiInVal);
1219 // If there are multiple PHIs, sort their operands so that they all list
1220 // the blocks in the same order. This will help identical PHIs be eliminated
1221 // by other passes. Other passes shouldn't depend on this for correctness
1223 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
1225 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
1226 BasicBlock *BBA = PN.getIncomingBlock(i);
1227 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
1229 Value *VA = PN.getIncomingValue(i);
1230 unsigned j = PN.getBasicBlockIndex(BBB);
1231 Value *VB = PN.getIncomingValue(j);
1232 PN.setIncomingBlock(i, BBB);
1233 PN.setIncomingValue(i, VB);
1234 PN.setIncomingBlock(j, BBA);
1235 PN.setIncomingValue(j, VA);
1236 // NOTE: Instcombine normally would want us to "return &PN" if we
1237 // modified any of the operands of an instruction. However, since we
1238 // aren't adding or removing uses (just rearranging them) we don't do
1239 // this in this case.
1243 // If this is an integer PHI and we know that it has an illegal type, see if
1244 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1245 // PHI into the various pieces being extracted. This sort of thing is
1246 // introduced when SROA promotes an aggregate to a single large integer type.
1247 if (PN.getType()->isIntegerTy() &&
1248 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1249 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))