1 //===- InstCombinePHI.cpp -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitPHINode function.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Transforms/Utils/Local.h"
21 #include "llvm/IR/DebugInfo.h"
23 using namespace llvm::PatternMatch;
25 #define DEBUG_TYPE "instcombine"
27 /// The PHI arguments will be folded into a single operation with a PHI node
28 /// as input. The debug location of the single operation will be the merged
29 /// locations of the original PHI node arguments.
30 DebugLoc InstCombiner::PHIArgMergedDebugLoc(PHINode &PN) {
31 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
32 const DILocation *Loc = FirstInst->getDebugLoc();
34 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
35 auto *I = cast<Instruction>(PN.getIncomingValue(i));
36 Loc = DILocation::getMergedLocation(Loc, I->getDebugLoc());
42 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
43 /// adds all have a single use, turn this into a phi and a single binop.
44 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
45 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
46 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
47 unsigned Opc = FirstInst->getOpcode();
48 Value *LHSVal = FirstInst->getOperand(0);
49 Value *RHSVal = FirstInst->getOperand(1);
51 Type *LHSType = LHSVal->getType();
52 Type *RHSType = RHSVal->getType();
54 // Scan to see if all operands are the same opcode, and all have one use.
55 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
56 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
57 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
58 // Verify type of the LHS matches so we don't fold cmp's of different
60 I->getOperand(0)->getType() != LHSType ||
61 I->getOperand(1)->getType() != RHSType)
64 // If they are CmpInst instructions, check their predicates
65 if (CmpInst *CI = dyn_cast<CmpInst>(I))
66 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
69 // Keep track of which operand needs a phi node.
70 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
71 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
74 // If both LHS and RHS would need a PHI, don't do this transformation,
75 // because it would increase the number of PHIs entering the block,
76 // which leads to higher register pressure. This is especially
77 // bad when the PHIs are in the header of a loop.
78 if (!LHSVal && !RHSVal)
81 // Otherwise, this is safe to transform!
83 Value *InLHS = FirstInst->getOperand(0);
84 Value *InRHS = FirstInst->getOperand(1);
85 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
87 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
88 FirstInst->getOperand(0)->getName() + ".pn");
89 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
90 InsertNewInstBefore(NewLHS, PN);
95 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
96 FirstInst->getOperand(1)->getName() + ".pn");
97 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
98 InsertNewInstBefore(NewRHS, PN);
102 // Add all operands to the new PHIs.
103 if (NewLHS || NewRHS) {
104 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
105 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
107 Value *NewInLHS = InInst->getOperand(0);
108 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
111 Value *NewInRHS = InInst->getOperand(1);
112 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
117 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
118 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
120 NewCI->setDebugLoc(PHIArgMergedDebugLoc(PN));
124 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
125 BinaryOperator *NewBinOp =
126 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
128 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
130 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
131 NewBinOp->andIRFlags(PN.getIncomingValue(i));
133 NewBinOp->setDebugLoc(PHIArgMergedDebugLoc(PN));
137 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
138 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
140 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
141 FirstInst->op_end());
142 // This is true if all GEP bases are allocas and if all indices into them are
144 bool AllBasePointersAreAllocas = true;
146 // We don't want to replace this phi if the replacement would require
147 // more than one phi, which leads to higher register pressure. This is
148 // especially bad when the PHIs are in the header of a loop.
149 bool NeededPhi = false;
151 bool AllInBounds = true;
153 // Scan to see if all operands are the same opcode, and all have one use.
154 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
155 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
156 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
157 GEP->getNumOperands() != FirstInst->getNumOperands())
160 AllInBounds &= GEP->isInBounds();
162 // Keep track of whether or not all GEPs are of alloca pointers.
163 if (AllBasePointersAreAllocas &&
164 (!isa<AllocaInst>(GEP->getOperand(0)) ||
165 !GEP->hasAllConstantIndices()))
166 AllBasePointersAreAllocas = false;
168 // Compare the operand lists.
169 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
170 if (FirstInst->getOperand(op) == GEP->getOperand(op))
173 // Don't merge two GEPs when two operands differ (introducing phi nodes)
174 // if one of the PHIs has a constant for the index. The index may be
175 // substantially cheaper to compute for the constants, so making it a
176 // variable index could pessimize the path. This also handles the case
177 // for struct indices, which must always be constant.
178 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
179 isa<ConstantInt>(GEP->getOperand(op)))
182 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
185 // If we already needed a PHI for an earlier operand, and another operand
186 // also requires a PHI, we'd be introducing more PHIs than we're
187 // eliminating, which increases register pressure on entry to the PHI's
192 FixedOperands[op] = nullptr; // Needs a PHI.
197 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
198 // bother doing this transformation. At best, this will just save a bit of
199 // offset calculation, but all the predecessors will have to materialize the
200 // stack address into a register anyway. We'd actually rather *clone* the
201 // load up into the predecessors so that we have a load of a gep of an alloca,
202 // which can usually all be folded into the load.
203 if (AllBasePointersAreAllocas)
206 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
208 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
210 bool HasAnyPHIs = false;
211 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
212 if (FixedOperands[i]) continue; // operand doesn't need a phi.
213 Value *FirstOp = FirstInst->getOperand(i);
214 PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
215 FirstOp->getName()+".pn");
216 InsertNewInstBefore(NewPN, PN);
218 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
219 OperandPhis[i] = NewPN;
220 FixedOperands[i] = NewPN;
225 // Add all operands to the new PHIs.
227 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
228 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
229 BasicBlock *InBB = PN.getIncomingBlock(i);
231 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
232 if (PHINode *OpPhi = OperandPhis[op])
233 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
237 Value *Base = FixedOperands[0];
238 GetElementPtrInst *NewGEP =
239 GetElementPtrInst::Create(FirstInst->getSourceElementType(), Base,
240 makeArrayRef(FixedOperands).slice(1));
241 if (AllInBounds) NewGEP->setIsInBounds();
242 NewGEP->setDebugLoc(PHIArgMergedDebugLoc(PN));
247 /// Return true if we know that it is safe to sink the load out of the block
248 /// that defines it. This means that it must be obvious the value of the load is
249 /// not changed from the point of the load to the end of the block it is in.
251 /// Finally, it is safe, but not profitable, to sink a load targeting a
252 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
254 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
255 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
257 for (++BBI; BBI != E; ++BBI)
258 if (BBI->mayWriteToMemory())
261 // Check for non-address taken alloca. If not address-taken already, it isn't
262 // profitable to do this xform.
263 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
264 bool isAddressTaken = false;
265 for (User *U : AI->users()) {
266 if (isa<LoadInst>(U)) continue;
267 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
268 // If storing TO the alloca, then the address isn't taken.
269 if (SI->getOperand(1) == AI) continue;
271 isAddressTaken = true;
275 if (!isAddressTaken && AI->isStaticAlloca())
279 // If this load is a load from a GEP with a constant offset from an alloca,
280 // then we don't want to sink it. In its present form, it will be
281 // load [constant stack offset]. Sinking it will cause us to have to
282 // materialize the stack addresses in each predecessor in a register only to
283 // do a shared load from register in the successor.
284 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
285 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
286 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
292 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
293 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
295 // FIXME: This is overconservative; this transform is allowed in some cases
296 // for atomic operations.
297 if (FirstLI->isAtomic())
300 // When processing loads, we need to propagate two bits of information to the
301 // sunk load: whether it is volatile, and what its alignment is. We currently
302 // don't sink loads when some have their alignment specified and some don't.
303 // visitLoadInst will propagate an alignment onto the load when TD is around,
304 // and if TD isn't around, we can't handle the mixed case.
305 bool isVolatile = FirstLI->isVolatile();
306 unsigned LoadAlignment = FirstLI->getAlignment();
307 unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
309 // We can't sink the load if the loaded value could be modified between the
311 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
312 !isSafeAndProfitableToSinkLoad(FirstLI))
315 // If the PHI is of volatile loads and the load block has multiple
316 // successors, sinking it would remove a load of the volatile value from
317 // the path through the other successor.
319 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
322 // Check to see if all arguments are the same operation.
323 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
324 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
325 if (!LI || !LI->hasOneUse())
328 // We can't sink the load if the loaded value could be modified between
329 // the load and the PHI.
330 if (LI->isVolatile() != isVolatile ||
331 LI->getParent() != PN.getIncomingBlock(i) ||
332 LI->getPointerAddressSpace() != LoadAddrSpace ||
333 !isSafeAndProfitableToSinkLoad(LI))
336 // If some of the loads have an alignment specified but not all of them,
337 // we can't do the transformation.
338 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
341 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
343 // If the PHI is of volatile loads and the load block has multiple
344 // successors, sinking it would remove a load of the volatile value from
345 // the path through the other successor.
347 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
351 // Okay, they are all the same operation. Create a new PHI node of the
352 // correct type, and PHI together all of the LHS's of the instructions.
353 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
354 PN.getNumIncomingValues(),
357 Value *InVal = FirstLI->getOperand(0);
358 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
359 LoadInst *NewLI = new LoadInst(NewPN, "", isVolatile, LoadAlignment);
361 unsigned KnownIDs[] = {
362 LLVMContext::MD_tbaa,
363 LLVMContext::MD_range,
364 LLVMContext::MD_invariant_load,
365 LLVMContext::MD_alias_scope,
366 LLVMContext::MD_noalias,
367 LLVMContext::MD_nonnull,
368 LLVMContext::MD_align,
369 LLVMContext::MD_dereferenceable,
370 LLVMContext::MD_dereferenceable_or_null,
373 for (unsigned ID : KnownIDs)
374 NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
376 // Add all operands to the new PHI and combine TBAA metadata.
377 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
378 LoadInst *LI = cast<LoadInst>(PN.getIncomingValue(i));
379 combineMetadata(NewLI, LI, KnownIDs);
380 Value *NewInVal = LI->getOperand(0);
381 if (NewInVal != InVal)
383 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
387 // The new PHI unions all of the same values together. This is really
388 // common, so we handle it intelligently here for compile-time speed.
389 NewLI->setOperand(0, InVal);
392 InsertNewInstBefore(NewPN, PN);
395 // If this was a volatile load that we are merging, make sure to loop through
396 // and mark all the input loads as non-volatile. If we don't do this, we will
397 // insert a new volatile load and the old ones will not be deletable.
399 for (Value *IncValue : PN.incoming_values())
400 cast<LoadInst>(IncValue)->setVolatile(false);
402 NewLI->setDebugLoc(PHIArgMergedDebugLoc(PN));
406 /// TODO: This function could handle other cast types, but then it might
407 /// require special-casing a cast from the 'i1' type. See the comment in
408 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
409 Instruction *InstCombiner::FoldPHIArgZextsIntoPHI(PHINode &Phi) {
410 // We cannot create a new instruction after the PHI if the terminator is an
411 // EHPad because there is no valid insertion point.
412 if (TerminatorInst *TI = Phi.getParent()->getTerminator())
416 // Early exit for the common case of a phi with two operands. These are
417 // handled elsewhere. See the comment below where we check the count of zexts
418 // and constants for more details.
419 unsigned NumIncomingValues = Phi.getNumIncomingValues();
420 if (NumIncomingValues < 3)
423 // Find the narrower type specified by the first zext.
424 Type *NarrowType = nullptr;
425 for (Value *V : Phi.incoming_values()) {
426 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
427 NarrowType = Zext->getSrcTy();
434 // Walk the phi operands checking that we only have zexts or constants that
435 // we can shrink for free. Store the new operands for the new phi.
436 SmallVector<Value *, 4> NewIncoming;
437 unsigned NumZexts = 0;
438 unsigned NumConsts = 0;
439 for (Value *V : Phi.incoming_values()) {
440 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
441 // All zexts must be identical and have one use.
442 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUse())
444 NewIncoming.push_back(Zext->getOperand(0));
446 } else if (auto *C = dyn_cast<Constant>(V)) {
447 // Make sure that constants can fit in the new type.
448 Constant *Trunc = ConstantExpr::getTrunc(C, NarrowType);
449 if (ConstantExpr::getZExt(Trunc, C->getType()) != C)
451 NewIncoming.push_back(Trunc);
454 // If it's not a cast or a constant, bail out.
459 // The more common cases of a phi with no constant operands or just one
460 // variable operand are handled by FoldPHIArgOpIntoPHI() and FoldOpIntoPhi()
461 // respectively. FoldOpIntoPhi() wants to do the opposite transform that is
462 // performed here. It tries to replicate a cast in the phi operand's basic
463 // block to expose other folding opportunities. Thus, InstCombine will
464 // infinite loop without this check.
465 if (NumConsts == 0 || NumZexts < 2)
468 // All incoming values are zexts or constants that are safe to truncate.
469 // Create a new phi node of the narrow type, phi together all of the new
470 // operands, and zext the result back to the original type.
471 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
472 Phi.getName() + ".shrunk");
473 for (unsigned i = 0; i != NumIncomingValues; ++i)
474 NewPhi->addIncoming(NewIncoming[i], Phi.getIncomingBlock(i));
476 InsertNewInstBefore(NewPhi, Phi);
477 return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
480 /// If all operands to a PHI node are the same "unary" operator and they all are
481 /// only used by the PHI, PHI together their inputs, and do the operation once,
482 /// to the result of the PHI.
483 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
484 // We cannot create a new instruction after the PHI if the terminator is an
485 // EHPad because there is no valid insertion point.
486 if (TerminatorInst *TI = PN.getParent()->getTerminator())
490 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
492 if (isa<GetElementPtrInst>(FirstInst))
493 return FoldPHIArgGEPIntoPHI(PN);
494 if (isa<LoadInst>(FirstInst))
495 return FoldPHIArgLoadIntoPHI(PN);
497 // Scan the instruction, looking for input operations that can be folded away.
498 // If all input operands to the phi are the same instruction (e.g. a cast from
499 // the same type or "+42") we can pull the operation through the PHI, reducing
500 // code size and simplifying code.
501 Constant *ConstantOp = nullptr;
502 Type *CastSrcTy = nullptr;
504 if (isa<CastInst>(FirstInst)) {
505 CastSrcTy = FirstInst->getOperand(0)->getType();
507 // Be careful about transforming integer PHIs. We don't want to pessimize
508 // the code by turning an i32 into an i1293.
509 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
510 if (!ShouldChangeType(PN.getType(), CastSrcTy))
513 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
514 // Can fold binop, compare or shift here if the RHS is a constant,
515 // otherwise call FoldPHIArgBinOpIntoPHI.
516 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
518 return FoldPHIArgBinOpIntoPHI(PN);
520 return nullptr; // Cannot fold this operation.
523 // Check to see if all arguments are the same operation.
524 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
525 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
526 if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
529 if (I->getOperand(0)->getType() != CastSrcTy)
530 return nullptr; // Cast operation must match.
531 } else if (I->getOperand(1) != ConstantOp) {
536 // Okay, they are all the same operation. Create a new PHI node of the
537 // correct type, and PHI together all of the LHS's of the instructions.
538 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
539 PN.getNumIncomingValues(),
542 Value *InVal = FirstInst->getOperand(0);
543 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
545 // Add all operands to the new PHI.
546 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
547 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
548 if (NewInVal != InVal)
550 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
555 // The new PHI unions all of the same values together. This is really
556 // common, so we handle it intelligently here for compile-time speed.
560 InsertNewInstBefore(NewPN, PN);
564 // Insert and return the new operation.
565 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
566 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
568 NewCI->setDebugLoc(PHIArgMergedDebugLoc(PN));
572 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
573 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
574 BinOp->copyIRFlags(PN.getIncomingValue(0));
576 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
577 BinOp->andIRFlags(PN.getIncomingValue(i));
579 BinOp->setDebugLoc(PHIArgMergedDebugLoc(PN));
583 CmpInst *CIOp = cast<CmpInst>(FirstInst);
584 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
586 NewCI->setDebugLoc(PHIArgMergedDebugLoc(PN));
590 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
591 static bool DeadPHICycle(PHINode *PN,
592 SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
593 if (PN->use_empty()) return true;
594 if (!PN->hasOneUse()) return false;
596 // Remember this node, and if we find the cycle, return.
597 if (!PotentiallyDeadPHIs.insert(PN).second)
600 // Don't scan crazily complex things.
601 if (PotentiallyDeadPHIs.size() == 16)
604 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
605 return DeadPHICycle(PU, PotentiallyDeadPHIs);
610 /// Return true if this phi node is always equal to NonPhiInVal.
611 /// This happens with mutually cyclic phi nodes like:
612 /// z = some value; x = phi (y, z); y = phi (x, z)
613 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
614 SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
615 // See if we already saw this PHI node.
616 if (!ValueEqualPHIs.insert(PN).second)
619 // Don't scan crazily complex things.
620 if (ValueEqualPHIs.size() == 16)
623 // Scan the operands to see if they are either phi nodes or are equal to
625 for (Value *Op : PN->incoming_values()) {
626 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
627 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
629 } else if (Op != NonPhiInVal)
636 /// Return an existing non-zero constant if this phi node has one, otherwise
637 /// return constant 1.
638 static ConstantInt *GetAnyNonZeroConstInt(PHINode &PN) {
639 assert(isa<IntegerType>(PN.getType()) && "Expect only intger type phi");
640 for (Value *V : PN.operands())
641 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
642 if (!ConstVA->isZeroValue())
644 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
648 struct PHIUsageRecord {
649 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
650 unsigned Shift; // The amount shifted.
651 Instruction *Inst; // The trunc instruction.
653 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
654 : PHIId(pn), Shift(Sh), Inst(User) {}
656 bool operator<(const PHIUsageRecord &RHS) const {
657 if (PHIId < RHS.PHIId) return true;
658 if (PHIId > RHS.PHIId) return false;
659 if (Shift < RHS.Shift) return true;
660 if (Shift > RHS.Shift) return false;
661 return Inst->getType()->getPrimitiveSizeInBits() <
662 RHS.Inst->getType()->getPrimitiveSizeInBits();
666 struct LoweredPHIRecord {
667 PHINode *PN; // The PHI that was lowered.
668 unsigned Shift; // The amount shifted.
669 unsigned Width; // The width extracted.
671 LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
672 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
674 // Ctor form used by DenseMap.
675 LoweredPHIRecord(PHINode *pn, unsigned Sh)
676 : PN(pn), Shift(Sh), Width(0) {}
682 struct DenseMapInfo<LoweredPHIRecord> {
683 static inline LoweredPHIRecord getEmptyKey() {
684 return LoweredPHIRecord(nullptr, 0);
686 static inline LoweredPHIRecord getTombstoneKey() {
687 return LoweredPHIRecord(nullptr, 1);
689 static unsigned getHashValue(const LoweredPHIRecord &Val) {
690 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
693 static bool isEqual(const LoweredPHIRecord &LHS,
694 const LoweredPHIRecord &RHS) {
695 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
696 LHS.Width == RHS.Width;
702 /// This is an integer PHI and we know that it has an illegal type: see if it is
703 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
704 /// the various pieces being extracted. This sort of thing is introduced when
705 /// SROA promotes an aggregate to large integer values.
707 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
708 /// inttoptr. We should produce new PHIs in the right type.
710 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
711 // PHIUsers - Keep track of all of the truncated values extracted from a set
712 // of PHIs, along with their offset. These are the things we want to rewrite.
713 SmallVector<PHIUsageRecord, 16> PHIUsers;
715 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
716 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
717 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
718 // check the uses of (to ensure they are all extracts).
719 SmallVector<PHINode*, 8> PHIsToSlice;
720 SmallPtrSet<PHINode*, 8> PHIsInspected;
722 PHIsToSlice.push_back(&FirstPhi);
723 PHIsInspected.insert(&FirstPhi);
725 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
726 PHINode *PN = PHIsToSlice[PHIId];
728 // Scan the input list of the PHI. If any input is an invoke, and if the
729 // input is defined in the predecessor, then we won't be split the critical
730 // edge which is required to insert a truncate. Because of this, we have to
732 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
733 InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
735 if (II->getParent() != PN->getIncomingBlock(i))
738 // If we have a phi, and if it's directly in the predecessor, then we have
739 // a critical edge where we need to put the truncate. Since we can't
740 // split the edge in instcombine, we have to bail out.
744 for (User *U : PN->users()) {
745 Instruction *UserI = cast<Instruction>(U);
747 // If the user is a PHI, inspect its uses recursively.
748 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
749 if (PHIsInspected.insert(UserPN).second)
750 PHIsToSlice.push_back(UserPN);
754 // Truncates are always ok.
755 if (isa<TruncInst>(UserI)) {
756 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
760 // Otherwise it must be a lshr which can only be used by one trunc.
761 if (UserI->getOpcode() != Instruction::LShr ||
762 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
763 !isa<ConstantInt>(UserI->getOperand(1)))
766 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
767 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
771 // If we have no users, they must be all self uses, just nuke the PHI.
772 if (PHIUsers.empty())
773 return replaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
775 // If this phi node is transformable, create new PHIs for all the pieces
776 // extracted out of it. First, sort the users by their offset and size.
777 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
779 DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
780 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
781 dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
784 // PredValues - This is a temporary used when rewriting PHI nodes. It is
785 // hoisted out here to avoid construction/destruction thrashing.
786 DenseMap<BasicBlock*, Value*> PredValues;
788 // ExtractedVals - Each new PHI we introduce is saved here so we don't
789 // introduce redundant PHIs.
790 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
792 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
793 unsigned PHIId = PHIUsers[UserI].PHIId;
794 PHINode *PN = PHIsToSlice[PHIId];
795 unsigned Offset = PHIUsers[UserI].Shift;
796 Type *Ty = PHIUsers[UserI].Inst->getType();
800 // If we've already lowered a user like this, reuse the previously lowered
802 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
804 // Otherwise, Create the new PHI node for this user.
805 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
806 PN->getName()+".off"+Twine(Offset), PN);
807 assert(EltPHI->getType() != PN->getType() &&
808 "Truncate didn't shrink phi?");
810 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
811 BasicBlock *Pred = PN->getIncomingBlock(i);
812 Value *&PredVal = PredValues[Pred];
814 // If we already have a value for this predecessor, reuse it.
816 EltPHI->addIncoming(PredVal, Pred);
820 // Handle the PHI self-reuse case.
821 Value *InVal = PN->getIncomingValue(i);
824 EltPHI->addIncoming(PredVal, Pred);
828 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
829 // If the incoming value was a PHI, and if it was one of the PHIs we
830 // already rewrote it, just use the lowered value.
831 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
833 EltPHI->addIncoming(PredVal, Pred);
838 // Otherwise, do an extract in the predecessor.
839 Builder->SetInsertPoint(Pred->getTerminator());
842 Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
844 Res = Builder->CreateTrunc(Res, Ty, "extract.t");
846 EltPHI->addIncoming(Res, Pred);
848 // If the incoming value was a PHI, and if it was one of the PHIs we are
849 // rewriting, we will ultimately delete the code we inserted. This
850 // means we need to revisit that PHI to make sure we extract out the
852 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
853 if (PHIsInspected.count(OldInVal)) {
855 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
856 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
857 cast<Instruction>(Res)));
863 DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
865 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
868 // Replace the use of this piece with the PHI node.
869 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
872 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
874 Value *Undef = UndefValue::get(FirstPhi.getType());
875 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
876 replaceInstUsesWith(*PHIsToSlice[i], Undef);
877 return replaceInstUsesWith(FirstPhi, Undef);
880 // PHINode simplification
882 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
883 if (Value *V = SimplifyInstruction(&PN, DL, &TLI, &DT, &AC))
884 return replaceInstUsesWith(PN, V);
886 if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN))
889 // If all PHI operands are the same operation, pull them through the PHI,
890 // reducing code size.
891 if (isa<Instruction>(PN.getIncomingValue(0)) &&
892 isa<Instruction>(PN.getIncomingValue(1)) &&
893 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
894 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
895 // FIXME: The hasOneUse check will fail for PHIs that use the value more
896 // than themselves more than once.
897 PN.getIncomingValue(0)->hasOneUse())
898 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
901 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
902 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
903 // PHI)... break the cycle.
904 if (PN.hasOneUse()) {
905 Instruction *PHIUser = cast<Instruction>(PN.user_back());
906 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
907 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
908 PotentiallyDeadPHIs.insert(&PN);
909 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
910 return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
913 // If this phi has a single use, and if that use just computes a value for
914 // the next iteration of a loop, delete the phi. This occurs with unused
915 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
916 // common case here is good because the only other things that catch this
917 // are induction variable analysis (sometimes) and ADCE, which is only run
919 if (PHIUser->hasOneUse() &&
920 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
921 PHIUser->user_back() == &PN) {
922 return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
924 // When a PHI is used only to be compared with zero, it is safe to replace
925 // an incoming value proved as known nonzero with any non-zero constant.
926 // For example, in the code below, the incoming value %v can be replaced
927 // with any non-zero constant based on the fact that the PHI is only used to
928 // be compared with zero and %v is a known non-zero value:
929 // %v = select %cond, 1, 2
930 // %p = phi [%v, BB] ...
932 auto *CmpInst = dyn_cast<ICmpInst>(PHIUser);
933 // FIXME: To be simple, handle only integer type for now.
934 if (CmpInst && isa<IntegerType>(PN.getType()) && CmpInst->isEquality() &&
935 match(CmpInst->getOperand(1), m_Zero())) {
936 ConstantInt *NonZeroConst = nullptr;
937 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
938 Instruction *CtxI = PN.getIncomingBlock(i)->getTerminator();
939 Value *VA = PN.getIncomingValue(i);
940 if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
942 NonZeroConst = GetAnyNonZeroConstInt(PN);
943 PN.setIncomingValue(i, NonZeroConst);
949 // We sometimes end up with phi cycles that non-obviously end up being the
950 // same value, for example:
951 // z = some value; x = phi (y, z); y = phi (x, z)
952 // where the phi nodes don't necessarily need to be in the same block. Do a
953 // quick check to see if the PHI node only contains a single non-phi value, if
954 // so, scan to see if the phi cycle is actually equal to that value.
956 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
957 // Scan for the first non-phi operand.
958 while (InValNo != NumIncomingVals &&
959 isa<PHINode>(PN.getIncomingValue(InValNo)))
962 if (InValNo != NumIncomingVals) {
963 Value *NonPhiInVal = PN.getIncomingValue(InValNo);
965 // Scan the rest of the operands to see if there are any conflicts, if so
966 // there is no need to recursively scan other phis.
967 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
968 Value *OpVal = PN.getIncomingValue(InValNo);
969 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
973 // If we scanned over all operands, then we have one unique value plus
974 // phi values. Scan PHI nodes to see if they all merge in each other or
976 if (InValNo == NumIncomingVals) {
977 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
978 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
979 return replaceInstUsesWith(PN, NonPhiInVal);
984 // If there are multiple PHIs, sort their operands so that they all list
985 // the blocks in the same order. This will help identical PHIs be eliminated
986 // by other passes. Other passes shouldn't depend on this for correctness
988 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
990 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
991 BasicBlock *BBA = PN.getIncomingBlock(i);
992 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
994 Value *VA = PN.getIncomingValue(i);
995 unsigned j = PN.getBasicBlockIndex(BBB);
996 Value *VB = PN.getIncomingValue(j);
997 PN.setIncomingBlock(i, BBB);
998 PN.setIncomingValue(i, VB);
999 PN.setIncomingBlock(j, BBA);
1000 PN.setIncomingValue(j, VA);
1001 // NOTE: Instcombine normally would want us to "return &PN" if we
1002 // modified any of the operands of an instruction. However, since we
1003 // aren't adding or removing uses (just rearranging them) we don't do
1004 // this in this case.
1008 // If this is an integer PHI and we know that it has an illegal type, see if
1009 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1010 // PHI into the various pieces being extracted. This sort of thing is
1011 // introduced when SROA promotes an aggregate to a single large integer type.
1012 if (PN.getType()->isIntegerTy() &&
1013 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1014 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))