1 //===- InstCombineCasts.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for cast operations.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/Analysis/TargetLibraryInfo.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/DIBuilder.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
23 using namespace PatternMatch;
25 #define DEBUG_TYPE "instcombine"
27 /// Analyze 'Val', seeing if it is a simple linear expression.
28 /// If so, decompose it, returning some value X, such that Val is
31 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
33 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
34 Offset = CI->getZExtValue();
36 return ConstantInt::get(Val->getType(), 0);
39 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
40 // Cannot look past anything that might overflow.
41 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
42 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
48 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
49 if (I->getOpcode() == Instruction::Shl) {
50 // This is a value scaled by '1 << the shift amt'.
51 Scale = UINT64_C(1) << RHS->getZExtValue();
53 return I->getOperand(0);
56 if (I->getOpcode() == Instruction::Mul) {
57 // This value is scaled by 'RHS'.
58 Scale = RHS->getZExtValue();
60 return I->getOperand(0);
63 if (I->getOpcode() == Instruction::Add) {
64 // We have X+C. Check to see if we really have (X*C2)+C1,
65 // where C1 is divisible by C2.
68 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
69 Offset += RHS->getZExtValue();
76 // Otherwise, we can't look past this.
82 /// If we find a cast of an allocation instruction, try to eliminate the cast by
83 /// moving the type information into the alloc.
84 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
86 PointerType *PTy = cast<PointerType>(CI.getType());
88 BuilderTy AllocaBuilder(Builder);
89 AllocaBuilder.SetInsertPoint(&AI);
91 // Get the type really allocated and the type casted to.
92 Type *AllocElTy = AI.getAllocatedType();
93 Type *CastElTy = PTy->getElementType();
94 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
96 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy);
97 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy);
98 if (CastElTyAlign < AllocElTyAlign) return nullptr;
100 // If the allocation has multiple uses, only promote it if we are strictly
101 // increasing the alignment of the resultant allocation. If we keep it the
102 // same, we open the door to infinite loops of various kinds.
103 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
105 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy);
106 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy);
107 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
109 // If the allocation has multiple uses, only promote it if we're not
110 // shrinking the amount of memory being allocated.
111 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy);
112 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy);
113 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
115 // See if we can satisfy the modulus by pulling a scale out of the array
117 unsigned ArraySizeScale;
118 uint64_t ArrayOffset;
119 Value *NumElements = // See if the array size is a decomposable linear expr.
120 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
122 // If we can now satisfy the modulus, by using a non-1 scale, we really can
124 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
125 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
127 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
128 Value *Amt = nullptr;
132 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
133 // Insert before the alloca, not before the cast.
134 Amt = AllocaBuilder.CreateMul(Amt, NumElements);
137 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
138 Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
140 Amt = AllocaBuilder.CreateAdd(Amt, Off);
143 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
144 New->setAlignment(MaybeAlign(AI.getAlignment()));
146 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
148 // If the allocation has multiple real uses, insert a cast and change all
149 // things that used it to use the new cast. This will also hack on CI, but it
151 if (!AI.hasOneUse()) {
152 // New is the allocation instruction, pointer typed. AI is the original
153 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
154 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
155 replaceInstUsesWith(AI, NewCast);
157 return replaceInstUsesWith(CI, New);
160 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
161 /// true for, actually insert the code to evaluate the expression.
162 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
164 if (Constant *C = dyn_cast<Constant>(V)) {
165 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
166 // If we got a constantexpr back, try to simplify it with DL info.
167 if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI))
172 // Otherwise, it must be an instruction.
173 Instruction *I = cast<Instruction>(V);
174 Instruction *Res = nullptr;
175 unsigned Opc = I->getOpcode();
177 case Instruction::Add:
178 case Instruction::Sub:
179 case Instruction::Mul:
180 case Instruction::And:
181 case Instruction::Or:
182 case Instruction::Xor:
183 case Instruction::AShr:
184 case Instruction::LShr:
185 case Instruction::Shl:
186 case Instruction::UDiv:
187 case Instruction::URem: {
188 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
189 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
190 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
193 case Instruction::Trunc:
194 case Instruction::ZExt:
195 case Instruction::SExt:
196 // If the source type of the cast is the type we're trying for then we can
197 // just return the source. There's no need to insert it because it is not
199 if (I->getOperand(0)->getType() == Ty)
200 return I->getOperand(0);
202 // Otherwise, must be the same type of cast, so just reinsert a new one.
203 // This also handles the case of zext(trunc(x)) -> zext(x).
204 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
205 Opc == Instruction::SExt);
207 case Instruction::Select: {
208 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
209 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
210 Res = SelectInst::Create(I->getOperand(0), True, False);
213 case Instruction::PHI: {
214 PHINode *OPN = cast<PHINode>(I);
215 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
216 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
218 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
219 NPN->addIncoming(V, OPN->getIncomingBlock(i));
225 // TODO: Can handle more cases here.
226 llvm_unreachable("Unreachable!");
230 return InsertNewInstWith(Res, *I);
233 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst *CI1,
234 const CastInst *CI2) {
235 Type *SrcTy = CI1->getSrcTy();
236 Type *MidTy = CI1->getDestTy();
237 Type *DstTy = CI2->getDestTy();
239 Instruction::CastOps firstOp = CI1->getOpcode();
240 Instruction::CastOps secondOp = CI2->getOpcode();
242 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
244 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
246 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
247 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
248 DstTy, SrcIntPtrTy, MidIntPtrTy,
251 // We don't want to form an inttoptr or ptrtoint that converts to an integer
252 // type that differs from the pointer size.
253 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
254 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
257 return Instruction::CastOps(Res);
260 /// Implement the transforms common to all CastInst visitors.
261 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
262 Value *Src = CI.getOperand(0);
264 // Try to eliminate a cast of a cast.
265 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
266 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
267 // The first cast (CSrc) is eliminable so we need to fix up or replace
268 // the second cast (CI). CSrc will then have a good chance of being dead.
269 auto *Ty = CI.getType();
270 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty);
271 // Point debug users of the dying cast to the new one.
272 if (CSrc->hasOneUse())
273 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT);
278 if (auto *Sel = dyn_cast<SelectInst>(Src)) {
279 // We are casting a select. Try to fold the cast into the select, but only
280 // if the select does not have a compare instruction with matching operand
281 // types. Creating a select with operands that are different sizes than its
282 // condition may inhibit other folds and lead to worse codegen.
283 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
284 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType())
285 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) {
286 replaceAllDbgUsesWith(*Sel, *NV, CI, DT);
291 // If we are casting a PHI, then fold the cast into the PHI.
292 if (auto *PN = dyn_cast<PHINode>(Src)) {
293 // Don't do this if it would create a PHI node with an illegal type from a
295 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
296 shouldChangeType(CI.getType(), Src->getType()))
297 if (Instruction *NV = foldOpIntoPhi(CI, PN))
304 /// Constants and extensions/truncates from the destination type are always
305 /// free to be evaluated in that type. This is a helper for canEvaluate*.
306 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) {
307 if (isa<Constant>(V))
310 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) &&
317 /// Filter out values that we can not evaluate in the destination type for free.
318 /// This is a helper for canEvaluate*.
319 static bool canNotEvaluateInType(Value *V, Type *Ty) {
320 assert(!isa<Constant>(V) && "Constant should already be handled.");
321 if (!isa<Instruction>(V))
323 // We don't extend or shrink something that has multiple uses -- doing so
324 // would require duplicating the instruction which isn't profitable.
331 /// Return true if we can evaluate the specified expression tree as type Ty
332 /// instead of its larger type, and arrive with the same value.
333 /// This is used by code that tries to eliminate truncates.
335 /// Ty will always be a type smaller than V. We should return true if trunc(V)
336 /// can be computed by computing V in the smaller type. If V is an instruction,
337 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
338 /// makes sense if x and y can be efficiently truncated.
340 /// This function works on both vectors and scalars.
342 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC,
344 if (canAlwaysEvaluateInType(V, Ty))
346 if (canNotEvaluateInType(V, Ty))
349 auto *I = cast<Instruction>(V);
350 Type *OrigTy = V->getType();
351 switch (I->getOpcode()) {
352 case Instruction::Add:
353 case Instruction::Sub:
354 case Instruction::Mul:
355 case Instruction::And:
356 case Instruction::Or:
357 case Instruction::Xor:
358 // These operators can all arbitrarily be extended or truncated.
359 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
360 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
362 case Instruction::UDiv:
363 case Instruction::URem: {
364 // UDiv and URem can be truncated if all the truncated bits are zero.
365 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
366 uint32_t BitWidth = Ty->getScalarSizeInBits();
367 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!");
368 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
369 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
370 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
371 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
372 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
376 case Instruction::Shl: {
377 // If we are truncating the result of this SHL, and if it's a shift of a
378 // constant amount, we can always perform a SHL in a smaller type.
380 if (match(I->getOperand(1), m_APInt(Amt))) {
381 uint32_t BitWidth = Ty->getScalarSizeInBits();
382 if (Amt->getLimitedValue(BitWidth) < BitWidth)
383 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
387 case Instruction::LShr: {
388 // If this is a truncate of a logical shr, we can truncate it to a smaller
389 // lshr iff we know that the bits we would otherwise be shifting in are
392 if (match(I->getOperand(1), m_APInt(Amt))) {
393 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
394 uint32_t BitWidth = Ty->getScalarSizeInBits();
395 if (Amt->getLimitedValue(BitWidth) < BitWidth &&
396 IC.MaskedValueIsZero(I->getOperand(0),
397 APInt::getBitsSetFrom(OrigBitWidth, BitWidth), 0, CxtI)) {
398 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
403 case Instruction::AShr: {
404 // If this is a truncate of an arithmetic shr, we can truncate it to a
405 // smaller ashr iff we know that all the bits from the sign bit of the
406 // original type and the sign bit of the truncate type are similar.
407 // TODO: It is enough to check that the bits we would be shifting in are
408 // similar to sign bit of the truncate type.
410 if (match(I->getOperand(1), m_APInt(Amt))) {
411 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
412 uint32_t BitWidth = Ty->getScalarSizeInBits();
413 if (Amt->getLimitedValue(BitWidth) < BitWidth &&
414 OrigBitWidth - BitWidth <
415 IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
416 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
420 case Instruction::Trunc:
421 // trunc(trunc(x)) -> trunc(x)
423 case Instruction::ZExt:
424 case Instruction::SExt:
425 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
426 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
428 case Instruction::Select: {
429 SelectInst *SI = cast<SelectInst>(I);
430 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
431 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
433 case Instruction::PHI: {
434 // We can change a phi if we can change all operands. Note that we never
435 // get into trouble with cyclic PHIs here because we only consider
436 // instructions with a single use.
437 PHINode *PN = cast<PHINode>(I);
438 for (Value *IncValue : PN->incoming_values())
439 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
444 // TODO: Can handle more cases here.
451 /// Given a vector that is bitcast to an integer, optionally logically
452 /// right-shifted, and truncated, convert it to an extractelement.
453 /// Example (big endian):
454 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
456 /// extractelement <4 x i32> %X, 1
457 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) {
458 Value *TruncOp = Trunc.getOperand(0);
459 Type *DestType = Trunc.getType();
460 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
463 Value *VecInput = nullptr;
464 ConstantInt *ShiftVal = nullptr;
465 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
466 m_LShr(m_BitCast(m_Value(VecInput)),
467 m_ConstantInt(ShiftVal)))) ||
468 !isa<VectorType>(VecInput->getType()))
471 VectorType *VecType = cast<VectorType>(VecInput->getType());
472 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
473 unsigned DestWidth = DestType->getPrimitiveSizeInBits();
474 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
476 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
479 // If the element type of the vector doesn't match the result type,
480 // bitcast it to a vector type that we can extract from.
481 unsigned NumVecElts = VecWidth / DestWidth;
482 if (VecType->getElementType() != DestType) {
483 VecType = VectorType::get(DestType, NumVecElts);
484 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
487 unsigned Elt = ShiftAmount / DestWidth;
488 if (IC.getDataLayout().isBigEndian())
489 Elt = NumVecElts - 1 - Elt;
491 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
494 /// Rotate left/right may occur in a wider type than necessary because of type
495 /// promotion rules. Try to narrow the inputs and convert to funnel shift.
496 Instruction *InstCombiner::narrowRotate(TruncInst &Trunc) {
497 assert((isa<VectorType>(Trunc.getSrcTy()) ||
498 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
499 "Don't narrow to an illegal scalar type");
501 // Bail out on strange types. It is possible to handle some of these patterns
502 // even with non-power-of-2 sizes, but it is not a likely scenario.
503 Type *DestTy = Trunc.getType();
504 unsigned NarrowWidth = DestTy->getScalarSizeInBits();
505 if (!isPowerOf2_32(NarrowWidth))
508 // First, find an or'd pair of opposite shifts with the same shifted operand:
509 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1))
511 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1)))))
514 Value *ShVal, *ShAmt0, *ShAmt1;
515 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
516 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
519 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode();
520 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode();
521 if (ShiftOpcode0 == ShiftOpcode1)
524 // Match the shift amount operands for a rotate pattern. This always matches
525 // a subtraction on the R operand.
526 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * {
527 // The shift amounts may add up to the narrow bit width:
528 // (shl ShVal, L) | (lshr ShVal, Width - L)
529 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L)))))
532 // The shift amount may be masked with negation:
533 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
535 unsigned Mask = Width - 1;
536 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
537 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
540 // Same as above, but the shift amount may be extended after masking:
541 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
542 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
548 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
549 bool SubIsOnLHS = false;
551 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
557 // The shifted value must have high zeros in the wide type. Typically, this
558 // will be a zext, but it could also be the result of an 'and' or 'shift'.
559 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
560 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
561 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc))
564 // We have an unnecessarily wide rotate!
565 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt))
566 // Narrow the inputs and convert to funnel shift intrinsic:
567 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt))
568 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
569 Value *X = Builder.CreateTrunc(ShVal, DestTy);
570 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) ||
571 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl);
572 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
573 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
574 return IntrinsicInst::Create(F, { X, X, NarrowShAmt });
577 /// Try to narrow the width of math or bitwise logic instructions by pulling a
578 /// truncate ahead of binary operators.
579 /// TODO: Transforms for truncated shifts should be moved into here.
580 Instruction *InstCombiner::narrowBinOp(TruncInst &Trunc) {
581 Type *SrcTy = Trunc.getSrcTy();
582 Type *DestTy = Trunc.getType();
583 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
586 BinaryOperator *BinOp;
587 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
590 Value *BinOp0 = BinOp->getOperand(0);
591 Value *BinOp1 = BinOp->getOperand(1);
592 switch (BinOp->getOpcode()) {
593 case Instruction::And:
594 case Instruction::Or:
595 case Instruction::Xor:
596 case Instruction::Add:
597 case Instruction::Sub:
598 case Instruction::Mul: {
600 if (match(BinOp0, m_Constant(C))) {
601 // trunc (binop C, X) --> binop (trunc C', X)
602 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
603 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy);
604 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
606 if (match(BinOp1, m_Constant(C))) {
607 // trunc (binop X, C) --> binop (trunc X, C')
608 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
609 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy);
610 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
613 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
614 // trunc (binop (ext X), Y) --> binop X, (trunc Y)
615 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
616 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
618 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
619 // trunc (binop Y, (ext X)) --> binop (trunc Y), X
620 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
621 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
629 if (Instruction *NarrowOr = narrowRotate(Trunc))
635 /// Try to narrow the width of a splat shuffle. This could be generalized to any
636 /// shuffle with a constant operand, but we limit the transform to avoid
637 /// creating a shuffle type that targets may not be able to lower effectively.
638 static Instruction *shrinkSplatShuffle(TruncInst &Trunc,
639 InstCombiner::BuilderTy &Builder) {
640 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
641 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) &&
642 Shuf->getMask()->getSplatValue() &&
643 Shuf->getType() == Shuf->getOperand(0)->getType()) {
644 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask
645 Constant *NarrowUndef = UndefValue::get(Trunc.getType());
646 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
647 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask());
653 /// Try to narrow the width of an insert element. This could be generalized for
654 /// any vector constant, but we limit the transform to insertion into undef to
655 /// avoid potential backend problems from unsupported insertion widths. This
656 /// could also be extended to handle the case of inserting a scalar constant
657 /// into a vector variable.
658 static Instruction *shrinkInsertElt(CastInst &Trunc,
659 InstCombiner::BuilderTy &Builder) {
660 Instruction::CastOps Opcode = Trunc.getOpcode();
661 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
662 "Unexpected instruction for shrinking");
664 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
665 if (!InsElt || !InsElt->hasOneUse())
668 Type *DestTy = Trunc.getType();
669 Type *DestScalarTy = DestTy->getScalarType();
670 Value *VecOp = InsElt->getOperand(0);
671 Value *ScalarOp = InsElt->getOperand(1);
672 Value *Index = InsElt->getOperand(2);
674 if (isa<UndefValue>(VecOp)) {
675 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
676 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
677 UndefValue *NarrowUndef = UndefValue::get(DestTy);
678 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
679 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
685 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
686 if (Instruction *Result = commonCastTransforms(CI))
689 Value *Src = CI.getOperand(0);
690 Type *DestTy = CI.getType(), *SrcTy = Src->getType();
692 // Attempt to truncate the entire input expression tree to the destination
693 // type. Only do this if the dest type is a simple type, don't convert the
694 // expression tree to something weird like i93 unless the source is also
696 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
697 canEvaluateTruncated(Src, DestTy, *this, &CI)) {
699 // If this cast is a truncate, evaluting in a different type always
700 // eliminates the cast, so it is always a win.
702 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
705 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
706 assert(Res->getType() == DestTy);
707 return replaceInstUsesWith(CI, Res);
710 // Test if the trunc is the user of a select which is part of a
711 // minimum or maximum operation. If so, don't do any more simplification.
712 // Even simplifying demanded bits can break the canonical form of a
715 if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0)))
716 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN)
719 // See if we can simplify any instructions used by the input whose sole
720 // purpose is to compute bits we don't care about.
721 if (SimplifyDemandedInstructionBits(CI))
724 if (DestTy->getScalarSizeInBits() == 1) {
725 Value *Zero = Constant::getNullValue(Src->getType());
726 if (DestTy->isIntegerTy()) {
727 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only).
728 // TODO: We canonicalize to more instructions here because we are probably
729 // lacking equivalent analysis for trunc relative to icmp. There may also
730 // be codegen concerns. If those trunc limitations were removed, we could
731 // remove this transform.
732 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1));
733 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
736 // For vectors, we do not canonicalize all truncs to icmp, so optimize
737 // patterns that would be covered within visitICmpInst.
740 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_APInt(C))))) {
741 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0
742 APInt MaskC = APInt(SrcTy->getScalarSizeInBits(), 1).shl(*C);
743 Value *And = Builder.CreateAnd(X, ConstantInt::get(SrcTy, MaskC));
744 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
746 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_APInt(C)),
748 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0
749 APInt MaskC = APInt(SrcTy->getScalarSizeInBits(), 1).shl(*C) | 1;
750 Value *And = Builder.CreateAnd(X, ConstantInt::get(SrcTy, MaskC));
751 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
755 // FIXME: Maybe combine the next two transforms to handle the no cast case
756 // more efficiently. Support vector types. Cleanup code by using m_OneUse.
758 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
759 Value *A = nullptr; ConstantInt *Cst = nullptr;
760 if (Src->hasOneUse() &&
761 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
762 // We have three types to worry about here, the type of A, the source of
763 // the truncate (MidSize), and the destination of the truncate. We know that
764 // ASize < MidSize and MidSize > ResultSize, but don't know the relation
765 // between ASize and ResultSize.
766 unsigned ASize = A->getType()->getPrimitiveSizeInBits();
768 // If the shift amount is larger than the size of A, then the result is
769 // known to be zero because all the input bits got shifted out.
770 if (Cst->getZExtValue() >= ASize)
771 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy));
773 // Since we're doing an lshr and a zero extend, and know that the shift
774 // amount is smaller than ASize, it is always safe to do the shift in A's
775 // type, then zero extend or truncate to the result.
776 Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue());
777 Shift->takeName(Src);
778 return CastInst::CreateIntegerCast(Shift, DestTy, false);
781 // FIXME: We should canonicalize to zext/trunc and remove this transform.
782 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type
784 // It works because bits coming from sign extension have the same value as
785 // the sign bit of the original value; performing ashr instead of lshr
786 // generates bits of the same value as the sign bit.
787 if (Src->hasOneUse() &&
788 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst)))) {
789 Value *SExt = cast<Instruction>(Src)->getOperand(0);
790 const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits();
791 const unsigned ASize = A->getType()->getPrimitiveSizeInBits();
792 const unsigned CISize = CI.getType()->getPrimitiveSizeInBits();
793 const unsigned MaxAmt = SExtSize - std::max(CISize, ASize);
794 unsigned ShiftAmt = Cst->getZExtValue();
796 // This optimization can be only performed when zero bits generated by
797 // the original lshr aren't pulled into the value after truncation, so we
798 // can only shift by values no larger than the number of extension bits.
799 // FIXME: Instead of bailing when the shift is too large, use and to clear
801 if (ShiftAmt <= MaxAmt) {
803 return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(),
804 std::min(ShiftAmt, ASize - 1)));
805 if (SExt->hasOneUse()) {
806 Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1));
807 Shift->takeName(Src);
808 return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
813 if (Instruction *I = narrowBinOp(CI))
816 if (Instruction *I = shrinkSplatShuffle(CI, Builder))
819 if (Instruction *I = shrinkInsertElt(CI, Builder))
822 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) &&
823 shouldChangeType(SrcTy, DestTy)) {
824 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
825 // dest type is native and cst < dest size.
826 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) &&
827 !match(A, m_Shr(m_Value(), m_Constant()))) {
828 // Skip shifts of shift by constants. It undoes a combine in
829 // FoldShiftByConstant and is the extend in reg pattern.
830 const unsigned DestSize = DestTy->getScalarSizeInBits();
831 if (Cst->getValue().ult(DestSize)) {
832 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
834 return BinaryOperator::Create(
835 Instruction::Shl, NewTrunc,
836 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize)));
841 if (Instruction *I = foldVecTruncToExtElt(CI, *this))
847 Instruction *InstCombiner::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext,
849 // If we are just checking for a icmp eq of a single bit and zext'ing it
850 // to an integer, then shift the bit to the appropriate place and then
851 // cast to integer to avoid the comparison.
853 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) {
855 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
856 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
857 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) ||
858 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) {
859 if (!DoTransform) return Cmp;
861 Value *In = Cmp->getOperand(0);
862 Value *Sh = ConstantInt::get(In->getType(),
863 In->getType()->getScalarSizeInBits() - 1);
864 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
865 if (In->getType() != Zext.getType())
866 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/);
868 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) {
869 Constant *One = ConstantInt::get(In->getType(), 1);
870 In = Builder.CreateXor(In, One, In->getName() + ".not");
873 return replaceInstUsesWith(Zext, In);
876 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
877 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
878 // zext (X == 1) to i32 --> X iff X has only the low bit set.
879 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
880 // zext (X != 0) to i32 --> X iff X has only the low bit set.
881 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
882 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
883 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
884 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) &&
885 // This only works for EQ and NE
887 // If Op1C some other power of two, convert:
888 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext);
890 APInt KnownZeroMask(~Known.Zero);
891 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
892 if (!DoTransform) return Cmp;
894 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE;
895 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) {
896 // (X&4) == 2 --> false
897 // (X&4) != 2 --> true
898 Constant *Res = ConstantInt::get(Zext.getType(), isNE);
899 return replaceInstUsesWith(Zext, Res);
902 uint32_t ShAmt = KnownZeroMask.logBase2();
903 Value *In = Cmp->getOperand(0);
905 // Perform a logical shr by shiftamt.
906 // Insert the shift to put the result in the low bit.
907 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
908 In->getName() + ".lobit");
911 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit.
912 Constant *One = ConstantInt::get(In->getType(), 1);
913 In = Builder.CreateXor(In, One);
916 if (Zext.getType() == In->getType())
917 return replaceInstUsesWith(Zext, In);
919 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false);
920 return replaceInstUsesWith(Zext, IntCast);
925 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
926 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
927 // may lead to additional simplifications.
928 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) {
929 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) {
930 Value *LHS = Cmp->getOperand(0);
931 Value *RHS = Cmp->getOperand(1);
933 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext);
934 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext);
936 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
937 APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
938 APInt UnknownBit = ~KnownBits;
939 if (UnknownBit.countPopulation() == 1) {
940 if (!DoTransform) return Cmp;
942 Value *Result = Builder.CreateXor(LHS, RHS);
944 // Mask off any bits that are set and won't be shifted away.
945 if (KnownLHS.One.uge(UnknownBit))
946 Result = Builder.CreateAnd(Result,
947 ConstantInt::get(ITy, UnknownBit));
949 // Shift the bit we're testing down to the lsb.
950 Result = Builder.CreateLShr(
951 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
953 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
954 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
955 Result->takeName(Cmp);
956 return replaceInstUsesWith(Zext, Result);
965 /// Determine if the specified value can be computed in the specified wider type
966 /// and produce the same low bits. If not, return false.
968 /// If this function returns true, it can also return a non-zero number of bits
969 /// (in BitsToClear) which indicates that the value it computes is correct for
970 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
971 /// out. For example, to promote something like:
973 /// %B = trunc i64 %A to i32
974 /// %C = lshr i32 %B, 8
975 /// %E = zext i32 %C to i64
977 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
978 /// set to 8 to indicate that the promoted value needs to have bits 24-31
979 /// cleared in addition to bits 32-63. Since an 'and' will be generated to
980 /// clear the top bits anyway, doing this has no extra cost.
982 /// This function works on both vectors and scalars.
983 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
984 InstCombiner &IC, Instruction *CxtI) {
986 if (canAlwaysEvaluateInType(V, Ty))
988 if (canNotEvaluateInType(V, Ty))
991 auto *I = cast<Instruction>(V);
993 switch (I->getOpcode()) {
994 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
995 case Instruction::SExt: // zext(sext(x)) -> sext(x).
996 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
998 case Instruction::And:
999 case Instruction::Or:
1000 case Instruction::Xor:
1001 case Instruction::Add:
1002 case Instruction::Sub:
1003 case Instruction::Mul:
1004 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
1005 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
1007 // These can all be promoted if neither operand has 'bits to clear'.
1008 if (BitsToClear == 0 && Tmp == 0)
1011 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
1012 // other side, BitsToClear is ok.
1013 if (Tmp == 0 && I->isBitwiseLogicOp()) {
1014 // We use MaskedValueIsZero here for generality, but the case we care
1015 // about the most is constant RHS.
1016 unsigned VSize = V->getType()->getScalarSizeInBits();
1017 if (IC.MaskedValueIsZero(I->getOperand(1),
1018 APInt::getHighBitsSet(VSize, BitsToClear),
1020 // If this is an And instruction and all of the BitsToClear are
1021 // known to be zero we can reset BitsToClear.
1022 if (I->getOpcode() == Instruction::And)
1028 // Otherwise, we don't know how to analyze this BitsToClear case yet.
1031 case Instruction::Shl: {
1032 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the
1033 // upper bits we can reduce BitsToClear by the shift amount.
1035 if (match(I->getOperand(1), m_APInt(Amt))) {
1036 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1038 uint64_t ShiftAmt = Amt->getZExtValue();
1039 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1044 case Instruction::LShr: {
1045 // We can promote lshr(x, cst) if we can promote x. This requires the
1046 // ultimate 'and' to clear out the high zero bits we're clearing out though.
1048 if (match(I->getOperand(1), m_APInt(Amt))) {
1049 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1051 BitsToClear += Amt->getZExtValue();
1052 if (BitsToClear > V->getType()->getScalarSizeInBits())
1053 BitsToClear = V->getType()->getScalarSizeInBits();
1056 // Cannot promote variable LSHR.
1059 case Instruction::Select:
1060 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1061 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1062 // TODO: If important, we could handle the case when the BitsToClear are
1063 // known zero in the disagreeing side.
1068 case Instruction::PHI: {
1069 // We can change a phi if we can change all operands. Note that we never
1070 // get into trouble with cyclic PHIs here because we only consider
1071 // instructions with a single use.
1072 PHINode *PN = cast<PHINode>(I);
1073 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1075 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1076 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1077 // TODO: If important, we could handle the case when the BitsToClear
1078 // are known zero in the disagreeing input.
1084 // TODO: Can handle more cases here.
1089 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
1090 // If this zero extend is only used by a truncate, let the truncate be
1091 // eliminated before we try to optimize this zext.
1092 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1095 // If one of the common conversion will work, do it.
1096 if (Instruction *Result = commonCastTransforms(CI))
1099 Value *Src = CI.getOperand(0);
1100 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1102 // Try to extend the entire expression tree to the wide destination type.
1103 unsigned BitsToClear;
1104 if (shouldChangeType(SrcTy, DestTy) &&
1105 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1106 assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
1107 "Can't clear more bits than in SrcTy");
1109 // Okay, we can transform this! Insert the new expression now.
1111 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1112 " to avoid zero extend: "
1114 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
1115 assert(Res->getType() == DestTy);
1117 // Preserve debug values referring to Src if the zext is its last use.
1118 if (auto *SrcOp = dyn_cast<Instruction>(Src))
1119 if (SrcOp->hasOneUse())
1120 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT);
1122 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
1123 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1125 // If the high bits are already filled with zeros, just replace this
1126 // cast with the result.
1127 if (MaskedValueIsZero(Res,
1128 APInt::getHighBitsSet(DestBitSize,
1129 DestBitSize-SrcBitsKept),
1131 return replaceInstUsesWith(CI, Res);
1133 // We need to emit an AND to clear the high bits.
1134 Constant *C = ConstantInt::get(Res->getType(),
1135 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1136 return BinaryOperator::CreateAnd(Res, C);
1139 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1140 // types and if the sizes are just right we can convert this into a logical
1141 // 'and' which will be much cheaper than the pair of casts.
1142 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
1143 // TODO: Subsume this into EvaluateInDifferentType.
1145 // Get the sizes of the types involved. We know that the intermediate type
1146 // will be smaller than A or C, but don't know the relation between A and C.
1147 Value *A = CSrc->getOperand(0);
1148 unsigned SrcSize = A->getType()->getScalarSizeInBits();
1149 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1150 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1151 // If we're actually extending zero bits, then if
1152 // SrcSize < DstSize: zext(a & mask)
1153 // SrcSize == DstSize: a & mask
1154 // SrcSize > DstSize: trunc(a) & mask
1155 if (SrcSize < DstSize) {
1156 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1157 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
1158 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1159 return new ZExtInst(And, CI.getType());
1162 if (SrcSize == DstSize) {
1163 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1164 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1167 if (SrcSize > DstSize) {
1168 Value *Trunc = Builder.CreateTrunc(A, CI.getType());
1169 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1170 return BinaryOperator::CreateAnd(Trunc,
1171 ConstantInt::get(Trunc->getType(),
1176 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src))
1177 return transformZExtICmp(Cmp, CI);
1179 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
1180 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
1181 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one
1182 // of the (zext icmp) can be eliminated. If so, immediately perform the
1183 // according elimination.
1184 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
1185 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
1186 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
1187 (transformZExtICmp(LHS, CI, false) ||
1188 transformZExtICmp(RHS, CI, false))) {
1189 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp)
1190 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName());
1191 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName());
1192 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName());
1193 if (auto *OrInst = dyn_cast<Instruction>(Or))
1194 Builder.SetInsertPoint(OrInst);
1196 // Perform the elimination.
1197 if (auto *LZExt = dyn_cast<ZExtInst>(LCast))
1198 transformZExtICmp(LHS, *LZExt);
1199 if (auto *RZExt = dyn_cast<ZExtInst>(RCast))
1200 transformZExtICmp(RHS, *RZExt);
1202 return replaceInstUsesWith(CI, Or);
1206 // zext(trunc(X) & C) -> (X & zext(C)).
1210 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1211 X->getType() == CI.getType())
1212 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1214 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1216 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1217 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1218 X->getType() == CI.getType()) {
1219 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
1220 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1226 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
1227 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
1228 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
1229 ICmpInst::Predicate Pred = ICI->getPredicate();
1231 // Don't bother if Op1 isn't of vector or integer type.
1232 if (!Op1->getType()->isIntOrIntVectorTy())
1235 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
1236 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
1237 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
1238 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
1239 Value *Sh = ConstantInt::get(Op0->getType(),
1240 Op0->getType()->getScalarSizeInBits() - 1);
1241 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1242 if (In->getType() != CI.getType())
1243 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1245 if (Pred == ICmpInst::ICMP_SGT)
1246 In = Builder.CreateNot(In, In->getName() + ".not");
1247 return replaceInstUsesWith(CI, In);
1250 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1251 // If we know that only one bit of the LHS of the icmp can be set and we
1252 // have an equality comparison with zero or a power of 2, we can transform
1253 // the icmp and sext into bitwise/integer operations.
1254 if (ICI->hasOneUse() &&
1255 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1256 KnownBits Known = computeKnownBits(Op0, 0, &CI);
1258 APInt KnownZeroMask(~Known.Zero);
1259 if (KnownZeroMask.isPowerOf2()) {
1260 Value *In = ICI->getOperand(0);
1262 // If the icmp tests for a known zero bit we can constant fold it.
1263 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1264 Value *V = Pred == ICmpInst::ICMP_NE ?
1265 ConstantInt::getAllOnesValue(CI.getType()) :
1266 ConstantInt::getNullValue(CI.getType());
1267 return replaceInstUsesWith(CI, V);
1270 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1271 // sext ((x & 2^n) == 0) -> (x >> n) - 1
1272 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1273 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1274 // Perform a right shift to place the desired bit in the LSB.
1276 In = Builder.CreateLShr(In,
1277 ConstantInt::get(In->getType(), ShiftAmt));
1279 // At this point "In" is either 1 or 0. Subtract 1 to turn
1280 // {1, 0} -> {0, -1}.
1281 In = Builder.CreateAdd(In,
1282 ConstantInt::getAllOnesValue(In->getType()),
1285 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
1286 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1287 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1288 // Perform a left shift to place the desired bit in the MSB.
1290 In = Builder.CreateShl(In,
1291 ConstantInt::get(In->getType(), ShiftAmt));
1293 // Distribute the bit over the whole bit width.
1294 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1295 KnownZeroMask.getBitWidth() - 1), "sext");
1298 if (CI.getType() == In->getType())
1299 return replaceInstUsesWith(CI, In);
1300 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1308 /// Return true if we can take the specified value and return it as type Ty
1309 /// without inserting any new casts and without changing the value of the common
1310 /// low bits. This is used by code that tries to promote integer operations to
1311 /// a wider types will allow us to eliminate the extension.
1313 /// This function works on both vectors and scalars.
1315 static bool canEvaluateSExtd(Value *V, Type *Ty) {
1316 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1317 "Can't sign extend type to a smaller type");
1318 if (canAlwaysEvaluateInType(V, Ty))
1320 if (canNotEvaluateInType(V, Ty))
1323 auto *I = cast<Instruction>(V);
1324 switch (I->getOpcode()) {
1325 case Instruction::SExt: // sext(sext(x)) -> sext(x)
1326 case Instruction::ZExt: // sext(zext(x)) -> zext(x)
1327 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1329 case Instruction::And:
1330 case Instruction::Or:
1331 case Instruction::Xor:
1332 case Instruction::Add:
1333 case Instruction::Sub:
1334 case Instruction::Mul:
1335 // These operators can all arbitrarily be extended if their inputs can.
1336 return canEvaluateSExtd(I->getOperand(0), Ty) &&
1337 canEvaluateSExtd(I->getOperand(1), Ty);
1339 //case Instruction::Shl: TODO
1340 //case Instruction::LShr: TODO
1342 case Instruction::Select:
1343 return canEvaluateSExtd(I->getOperand(1), Ty) &&
1344 canEvaluateSExtd(I->getOperand(2), Ty);
1346 case Instruction::PHI: {
1347 // We can change a phi if we can change all operands. Note that we never
1348 // get into trouble with cyclic PHIs here because we only consider
1349 // instructions with a single use.
1350 PHINode *PN = cast<PHINode>(I);
1351 for (Value *IncValue : PN->incoming_values())
1352 if (!canEvaluateSExtd(IncValue, Ty)) return false;
1356 // TODO: Can handle more cases here.
1363 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
1364 // If this sign extend is only used by a truncate, let the truncate be
1365 // eliminated before we try to optimize this sext.
1366 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1369 if (Instruction *I = commonCastTransforms(CI))
1372 Value *Src = CI.getOperand(0);
1373 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1375 // If we know that the value being extended is positive, we can use a zext
1377 KnownBits Known = computeKnownBits(Src, 0, &CI);
1378 if (Known.isNonNegative())
1379 return CastInst::Create(Instruction::ZExt, Src, DestTy);
1381 // Try to extend the entire expression tree to the wide destination type.
1382 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) {
1383 // Okay, we can transform this! Insert the new expression now.
1385 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1386 " to avoid sign extend: "
1388 Value *Res = EvaluateInDifferentType(Src, DestTy, true);
1389 assert(Res->getType() == DestTy);
1391 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
1392 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1394 // If the high bits are already filled with sign bit, just replace this
1395 // cast with the result.
1396 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1397 return replaceInstUsesWith(CI, Res);
1399 // We need to emit a shl + ashr to do the sign extend.
1400 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1401 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1405 // If the input is a trunc from the destination type, then turn sext(trunc(x))
1408 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) {
1409 // sext(trunc(X)) --> ashr(shl(X, C), C)
1410 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1411 unsigned DestBitSize = DestTy->getScalarSizeInBits();
1412 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1413 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1416 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1417 return transformSExtICmp(ICI, CI);
1419 // If the input is a shl/ashr pair of a same constant, then this is a sign
1420 // extension from a smaller value. If we could trust arbitrary bitwidth
1421 // integers, we could turn this into a truncate to the smaller bit and then
1422 // use a sext for the whole extension. Since we don't, look deeper and check
1423 // for a truncate. If the source and dest are the same type, eliminate the
1424 // trunc and extend and just do shifts. For example, turn:
1425 // %a = trunc i32 %i to i8
1426 // %b = shl i8 %a, 6
1427 // %c = ashr i8 %b, 6
1428 // %d = sext i8 %c to i32
1430 // %a = shl i32 %i, 30
1431 // %d = ashr i32 %a, 30
1433 // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1434 ConstantInt *BA = nullptr, *CA = nullptr;
1435 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
1436 m_ConstantInt(CA))) &&
1437 BA == CA && A->getType() == CI.getType()) {
1438 unsigned MidSize = Src->getType()->getScalarSizeInBits();
1439 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
1440 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
1441 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
1442 A = Builder.CreateShl(A, ShAmtV, CI.getName());
1443 return BinaryOperator::CreateAShr(A, ShAmtV);
1450 /// Return a Constant* for the specified floating-point constant if it fits
1451 /// in the specified FP type without changing its value.
1452 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
1454 APFloat F = CFP->getValueAPF();
1455 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1459 static Type *shrinkFPConstant(ConstantFP *CFP) {
1460 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
1461 return nullptr; // No constant folding of this.
1462 // See if the value can be truncated to half and then reextended.
1463 if (fitsInFPType(CFP, APFloat::IEEEhalf()))
1464 return Type::getHalfTy(CFP->getContext());
1465 // See if the value can be truncated to float and then reextended.
1466 if (fitsInFPType(CFP, APFloat::IEEEsingle()))
1467 return Type::getFloatTy(CFP->getContext());
1468 if (CFP->getType()->isDoubleTy())
1469 return nullptr; // Won't shrink.
1470 if (fitsInFPType(CFP, APFloat::IEEEdouble()))
1471 return Type::getDoubleTy(CFP->getContext());
1472 // Don't try to shrink to various long double types.
1476 // Determine if this is a vector of ConstantFPs and if so, return the minimal
1477 // type we can safely truncate all elements to.
1478 // TODO: Make these support undef elements.
1479 static Type *shrinkFPConstantVector(Value *V) {
1480 auto *CV = dyn_cast<Constant>(V);
1481 if (!CV || !CV->getType()->isVectorTy())
1484 Type *MinType = nullptr;
1486 unsigned NumElts = CV->getType()->getVectorNumElements();
1487 for (unsigned i = 0; i != NumElts; ++i) {
1488 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1492 Type *T = shrinkFPConstant(CFP);
1496 // If we haven't found a type yet or this type has a larger mantissa than
1497 // our previous type, this is our new minimal type.
1498 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth())
1502 // Make a vector type from the minimal type.
1503 return VectorType::get(MinType, NumElts);
1506 /// Find the minimum FP type we can safely truncate to.
1507 static Type *getMinimumFPType(Value *V) {
1508 if (auto *FPExt = dyn_cast<FPExtInst>(V))
1509 return FPExt->getOperand(0)->getType();
1511 // If this value is a constant, return the constant in the smallest FP type
1512 // that can accurately represent it. This allows us to turn
1513 // (float)((double)X+2.0) into x+2.0f.
1514 if (auto *CFP = dyn_cast<ConstantFP>(V))
1515 if (Type *T = shrinkFPConstant(CFP))
1518 // Try to shrink a vector of FP constants.
1519 if (Type *T = shrinkFPConstantVector(V))
1522 return V->getType();
1525 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &FPT) {
1526 if (Instruction *I = commonCastTransforms(FPT))
1529 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1530 // simplify this expression to avoid one or more of the trunc/extend
1531 // operations if we can do so without changing the numerical results.
1533 // The exact manner in which the widths of the operands interact to limit
1534 // what we can and cannot do safely varies from operation to operation, and
1535 // is explained below in the various case statements.
1536 Type *Ty = FPT.getType();
1537 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0));
1538 if (BO && BO->hasOneUse()) {
1539 Type *LHSMinType = getMinimumFPType(BO->getOperand(0));
1540 Type *RHSMinType = getMinimumFPType(BO->getOperand(1));
1541 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1542 unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
1543 unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
1544 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1545 unsigned DstWidth = Ty->getFPMantissaWidth();
1546 switch (BO->getOpcode()) {
1548 case Instruction::FAdd:
1549 case Instruction::FSub:
1550 // For addition and subtraction, the infinitely precise result can
1551 // essentially be arbitrarily wide; proving that double rounding
1552 // will not occur because the result of OpI is exact (as we will for
1553 // FMul, for example) is hopeless. However, we *can* nonetheless
1554 // frequently know that double rounding cannot occur (or that it is
1555 // innocuous) by taking advantage of the specific structure of
1556 // infinitely-precise results that admit double rounding.
1558 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1559 // to represent both sources, we can guarantee that the double
1560 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1561 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1562 // for proof of this fact).
1564 // Note: Figueroa does not consider the case where DstFormat !=
1565 // SrcFormat. It's possible (likely even!) that this analysis
1566 // could be tightened for those cases, but they are rare (the main
1567 // case of interest here is (float)((double)float + float)).
1568 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1569 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1570 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1571 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS);
1572 RI->copyFastMathFlags(BO);
1576 case Instruction::FMul:
1577 // For multiplication, the infinitely precise result has at most
1578 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1579 // that such a value can be exactly represented, then no double
1580 // rounding can possibly occur; we can safely perform the operation
1581 // in the destination format if it can represent both sources.
1582 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1583 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1584 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1585 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO);
1588 case Instruction::FDiv:
1589 // For division, we use again use the bound from Figueroa's
1590 // dissertation. I am entirely certain that this bound can be
1591 // tightened in the unbalanced operand case by an analysis based on
1592 // the diophantine rational approximation bound, but the well-known
1593 // condition used here is a good conservative first pass.
1594 // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1595 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1596 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1597 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1598 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO);
1601 case Instruction::FRem: {
1602 // Remainder is straightforward. Remainder is always exact, so the
1603 // type of OpI doesn't enter into things at all. We simply evaluate
1604 // in whichever source type is larger, then convert to the
1605 // destination type.
1606 if (SrcWidth == OpWidth)
1609 if (LHSWidth == SrcWidth) {
1610 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
1611 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
1613 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
1614 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
1617 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO);
1618 return CastInst::CreateFPCast(ExactResult, Ty);
1623 // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1625 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0));
1626 if (Op && Op->hasOneUse()) {
1627 // FIXME: The FMF should propagate from the fptrunc, not the source op.
1628 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1629 if (isa<FPMathOperator>(Op))
1630 Builder.setFastMathFlags(Op->getFastMathFlags());
1632 if (match(Op, m_FNeg(m_Value(X)))) {
1633 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty);
1635 // FIXME: Once we're sure that unary FNeg optimizations are on par with
1636 // binary FNeg, this should always return a unary operator.
1637 if (isa<BinaryOperator>(Op))
1638 return BinaryOperator::CreateFNegFMF(InnerTrunc, Op);
1639 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op);
1642 // If we are truncating a select that has an extended operand, we can
1643 // narrow the other operand and do the select as a narrow op.
1644 Value *Cond, *X, *Y;
1645 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) &&
1646 X->getType() == Ty) {
1647 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y)
1648 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1649 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op);
1650 return replaceInstUsesWith(FPT, Sel);
1652 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) &&
1653 X->getType() == Ty) {
1654 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X
1655 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1656 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op);
1657 return replaceInstUsesWith(FPT, Sel);
1661 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) {
1662 switch (II->getIntrinsicID()) {
1664 case Intrinsic::ceil:
1665 case Intrinsic::fabs:
1666 case Intrinsic::floor:
1667 case Intrinsic::nearbyint:
1668 case Intrinsic::rint:
1669 case Intrinsic::round:
1670 case Intrinsic::trunc: {
1671 Value *Src = II->getArgOperand(0);
1672 if (!Src->hasOneUse())
1675 // Except for fabs, this transformation requires the input of the unary FP
1676 // operation to be itself an fpext from the type to which we're
1678 if (II->getIntrinsicID() != Intrinsic::fabs) {
1679 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1680 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty)
1684 // Do unary FP operation on smaller type.
1685 // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1686 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
1687 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(),
1688 II->getIntrinsicID(), Ty);
1689 SmallVector<OperandBundleDef, 1> OpBundles;
1690 II->getOperandBundlesAsDefs(OpBundles);
1692 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName());
1693 NewCI->copyFastMathFlags(II);
1699 if (Instruction *I = shrinkInsertElt(FPT, Builder))
1705 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
1706 return commonCastTransforms(CI);
1709 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1710 // This is safe if the intermediate type has enough bits in its mantissa to
1711 // accurately represent all values of X. For example, this won't work with
1712 // i64 -> float -> i64.
1713 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) {
1714 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1716 Instruction *OpI = cast<Instruction>(FI.getOperand(0));
1718 Value *SrcI = OpI->getOperand(0);
1719 Type *FITy = FI.getType();
1720 Type *OpITy = OpI->getType();
1721 Type *SrcTy = SrcI->getType();
1722 bool IsInputSigned = isa<SIToFPInst>(OpI);
1723 bool IsOutputSigned = isa<FPToSIInst>(FI);
1725 // We can safely assume the conversion won't overflow the output range,
1726 // because (for example) (uint8_t)18293.f is undefined behavior.
1728 // Since we can assume the conversion won't overflow, our decision as to
1729 // whether the input will fit in the float should depend on the minimum
1730 // of the input range and output range.
1732 // This means this is also safe for a signed input and unsigned output, since
1733 // a negative input would lead to undefined behavior.
1734 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned;
1735 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned;
1736 int ActualSize = std::min(InputSize, OutputSize);
1738 if (ActualSize <= OpITy->getFPMantissaWidth()) {
1739 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) {
1740 if (IsInputSigned && IsOutputSigned)
1741 return new SExtInst(SrcI, FITy);
1742 return new ZExtInst(SrcI, FITy);
1744 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits())
1745 return new TruncInst(SrcI, FITy);
1747 return replaceInstUsesWith(FI, SrcI);
1748 return new BitCastInst(SrcI, FITy);
1753 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
1754 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
1756 return commonCastTransforms(FI);
1758 if (Instruction *I = FoldItoFPtoI(FI))
1761 return commonCastTransforms(FI);
1764 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
1765 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
1767 return commonCastTransforms(FI);
1769 if (Instruction *I = FoldItoFPtoI(FI))
1772 return commonCastTransforms(FI);
1775 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
1776 return commonCastTransforms(CI);
1779 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
1780 return commonCastTransforms(CI);
1783 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
1784 // If the source integer type is not the intptr_t type for this target, do a
1785 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
1786 // cast to be exposed to other transforms.
1787 unsigned AS = CI.getAddressSpace();
1788 if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
1789 DL.getPointerSizeInBits(AS)) {
1790 Type *Ty = DL.getIntPtrType(CI.getContext(), AS);
1791 if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
1792 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
1794 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
1795 return new IntToPtrInst(P, CI.getType());
1798 if (Instruction *I = commonCastTransforms(CI))
1804 /// Implement the transforms for cast of pointer (bitcast/ptrtoint)
1805 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
1806 Value *Src = CI.getOperand(0);
1808 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
1809 // If casting the result of a getelementptr instruction with no offset, turn
1810 // this into a cast of the original pointer!
1811 if (GEP->hasAllZeroIndices() &&
1812 // If CI is an addrspacecast and GEP changes the poiner type, merging
1813 // GEP into CI would undo canonicalizing addrspacecast with different
1814 // pointer types, causing infinite loops.
1815 (!isa<AddrSpaceCastInst>(CI) ||
1816 GEP->getType() == GEP->getPointerOperandType())) {
1817 // Changing the cast operand is usually not a good idea but it is safe
1818 // here because the pointer operand is being replaced with another
1819 // pointer operand so the opcode doesn't need to change.
1821 CI.setOperand(0, GEP->getOperand(0));
1826 return commonCastTransforms(CI);
1829 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
1830 // If the destination integer type is not the intptr_t type for this target,
1831 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
1832 // to be exposed to other transforms.
1834 Type *Ty = CI.getType();
1835 unsigned AS = CI.getPointerAddressSpace();
1837 if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
1838 return commonPointerCastTransforms(CI);
1840 Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);
1841 if (Ty->isVectorTy()) // Handle vectors of pointers.
1842 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
1844 Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy);
1845 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
1848 /// This input value (which is known to have vector type) is being zero extended
1849 /// or truncated to the specified vector type. Since the zext/trunc is done
1850 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern,
1851 /// endianness will impact which end of the vector that is extended or
1854 /// A vector is always stored with index 0 at the lowest address, which
1855 /// corresponds to the most significant bits for a big endian stored integer and
1856 /// the least significant bits for little endian. A trunc/zext of an integer
1857 /// impacts the big end of the integer. Thus, we need to add/remove elements at
1858 /// the front of the vector for big endian targets, and the back of the vector
1859 /// for little endian targets.
1861 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
1863 /// The source and destination vector types may have different element types.
1864 static Instruction *optimizeVectorResizeWithIntegerBitCasts(Value *InVal,
1867 // We can only do this optimization if the output is a multiple of the input
1868 // element size, or the input is a multiple of the output element size.
1869 // Convert the input type to have the same element type as the output.
1870 VectorType *SrcTy = cast<VectorType>(InVal->getType());
1872 if (SrcTy->getElementType() != DestTy->getElementType()) {
1873 // The input types don't need to be identical, but for now they must be the
1874 // same size. There is no specific reason we couldn't handle things like
1875 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
1877 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
1878 DestTy->getElementType()->getPrimitiveSizeInBits())
1881 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
1882 InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
1885 bool IsBigEndian = IC.getDataLayout().isBigEndian();
1886 unsigned SrcElts = SrcTy->getNumElements();
1887 unsigned DestElts = DestTy->getNumElements();
1889 assert(SrcElts != DestElts && "Element counts should be different.");
1891 // Now that the element types match, get the shuffle mask and RHS of the
1892 // shuffle to use, which depends on whether we're increasing or decreasing the
1893 // size of the input.
1894 SmallVector<uint32_t, 16> ShuffleMaskStorage;
1895 ArrayRef<uint32_t> ShuffleMask;
1898 // Produce an identify shuffle mask for the src vector.
1899 ShuffleMaskStorage.resize(SrcElts);
1900 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0);
1902 if (SrcElts > DestElts) {
1903 // If we're shrinking the number of elements (rewriting an integer
1904 // truncate), just shuffle in the elements corresponding to the least
1905 // significant bits from the input and use undef as the second shuffle
1907 V2 = UndefValue::get(SrcTy);
1908 // Make sure the shuffle mask selects the "least significant bits" by
1909 // keeping elements from back of the src vector for big endian, and from the
1910 // front for little endian.
1911 ShuffleMask = ShuffleMaskStorage;
1913 ShuffleMask = ShuffleMask.take_back(DestElts);
1915 ShuffleMask = ShuffleMask.take_front(DestElts);
1917 // If we're increasing the number of elements (rewriting an integer zext),
1918 // shuffle in all of the elements from InVal. Fill the rest of the result
1919 // elements with zeros from a constant zero.
1920 V2 = Constant::getNullValue(SrcTy);
1921 // Use first elt from V2 when indicating zero in the shuffle mask.
1922 uint32_t NullElt = SrcElts;
1923 // Extend with null values in the "most significant bits" by adding elements
1924 // in front of the src vector for big endian, and at the back for little
1926 unsigned DeltaElts = DestElts - SrcElts;
1928 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
1930 ShuffleMaskStorage.append(DeltaElts, NullElt);
1931 ShuffleMask = ShuffleMaskStorage;
1934 return new ShuffleVectorInst(InVal, V2,
1935 ConstantDataVector::get(V2->getContext(),
1939 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
1940 return Value % Ty->getPrimitiveSizeInBits() == 0;
1943 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
1944 return Value / Ty->getPrimitiveSizeInBits();
1947 /// V is a value which is inserted into a vector of VecEltTy.
1948 /// Look through the value to see if we can decompose it into
1949 /// insertions into the vector. See the example in the comment for
1950 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
1951 /// The type of V is always a non-zero multiple of VecEltTy's size.
1952 /// Shift is the number of bits between the lsb of V and the lsb of
1955 /// This returns false if the pattern can't be matched or true if it can,
1956 /// filling in Elements with the elements found here.
1957 static bool collectInsertionElements(Value *V, unsigned Shift,
1958 SmallVectorImpl<Value *> &Elements,
1959 Type *VecEltTy, bool isBigEndian) {
1960 assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
1961 "Shift should be a multiple of the element type size");
1963 // Undef values never contribute useful bits to the result.
1964 if (isa<UndefValue>(V)) return true;
1966 // If we got down to a value of the right type, we win, try inserting into the
1968 if (V->getType() == VecEltTy) {
1969 // Inserting null doesn't actually insert any elements.
1970 if (Constant *C = dyn_cast<Constant>(V))
1971 if (C->isNullValue())
1974 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
1976 ElementIndex = Elements.size() - ElementIndex - 1;
1978 // Fail if multiple elements are inserted into this slot.
1979 if (Elements[ElementIndex])
1982 Elements[ElementIndex] = V;
1986 if (Constant *C = dyn_cast<Constant>(V)) {
1987 // Figure out the # elements this provides, and bitcast it or slice it up
1989 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
1991 // If the constant is the size of a vector element, we just need to bitcast
1992 // it to the right type so it gets properly inserted.
1994 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
1995 Shift, Elements, VecEltTy, isBigEndian);
1997 // Okay, this is a constant that covers multiple elements. Slice it up into
1998 // pieces and insert each element-sized piece into the vector.
1999 if (!isa<IntegerType>(C->getType()))
2000 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
2001 C->getType()->getPrimitiveSizeInBits()));
2002 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
2003 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
2005 for (unsigned i = 0; i != NumElts; ++i) {
2006 unsigned ShiftI = Shift+i*ElementSize;
2007 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
2009 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
2010 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
2017 if (!V->hasOneUse()) return false;
2019 Instruction *I = dyn_cast<Instruction>(V);
2020 if (!I) return false;
2021 switch (I->getOpcode()) {
2022 default: return false; // Unhandled case.
2023 case Instruction::BitCast:
2024 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2026 case Instruction::ZExt:
2027 if (!isMultipleOfTypeSize(
2028 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2031 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2033 case Instruction::Or:
2034 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2036 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
2038 case Instruction::Shl: {
2039 // Must be shifting by a constant that is a multiple of the element size.
2040 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
2041 if (!CI) return false;
2042 Shift += CI->getZExtValue();
2043 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
2044 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2052 /// If the input is an 'or' instruction, we may be doing shifts and ors to
2053 /// assemble the elements of the vector manually.
2054 /// Try to rip the code out and replace it with insertelements. This is to
2055 /// optimize code like this:
2057 /// %tmp37 = bitcast float %inc to i32
2058 /// %tmp38 = zext i32 %tmp37 to i64
2059 /// %tmp31 = bitcast float %inc5 to i32
2060 /// %tmp32 = zext i32 %tmp31 to i64
2061 /// %tmp33 = shl i64 %tmp32, 32
2062 /// %ins35 = or i64 %tmp33, %tmp38
2063 /// %tmp43 = bitcast i64 %ins35 to <2 x float>
2065 /// Into two insertelements that do "buildvector{%inc, %inc5}".
2066 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
2068 VectorType *DestVecTy = cast<VectorType>(CI.getType());
2069 Value *IntInput = CI.getOperand(0);
2071 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
2072 if (!collectInsertionElements(IntInput, 0, Elements,
2073 DestVecTy->getElementType(),
2074 IC.getDataLayout().isBigEndian()))
2077 // If we succeeded, we know that all of the element are specified by Elements
2078 // or are zero if Elements has a null entry. Recast this as a set of
2080 Value *Result = Constant::getNullValue(CI.getType());
2081 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
2082 if (!Elements[i]) continue; // Unset element.
2084 Result = IC.Builder.CreateInsertElement(Result, Elements[i],
2085 IC.Builder.getInt32(i));
2091 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
2092 /// vector followed by extract element. The backend tends to handle bitcasts of
2093 /// vectors better than bitcasts of scalars because vector registers are
2094 /// usually not type-specific like scalar integer or scalar floating-point.
2095 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
2097 // TODO: Create and use a pattern matcher for ExtractElementInst.
2098 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
2099 if (!ExtElt || !ExtElt->hasOneUse())
2102 // The bitcast must be to a vectorizable type, otherwise we can't make a new
2103 // type to extract from.
2104 Type *DestType = BitCast.getType();
2105 if (!VectorType::isValidElementType(DestType))
2108 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements();
2109 auto *NewVecType = VectorType::get(DestType, NumElts);
2110 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
2112 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
2115 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
2116 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
2117 InstCombiner::BuilderTy &Builder) {
2118 Type *DestTy = BitCast.getType();
2120 if (!DestTy->isIntOrIntVectorTy() ||
2121 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2122 !BO->isBitwiseLogicOp())
2125 // FIXME: This transform is restricted to vector types to avoid backend
2126 // problems caused by creating potentially illegal operations. If a fix-up is
2127 // added to handle that situation, we can remove this check.
2128 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2132 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2133 X->getType() == DestTy && !isa<Constant>(X)) {
2134 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2135 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2136 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2139 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2140 X->getType() == DestTy && !isa<Constant>(X)) {
2141 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2142 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2143 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2146 // Canonicalize vector bitcasts to come before vector bitwise logic with a
2147 // constant. This eases recognition of special constants for later ops.
2149 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2151 if (match(BO->getOperand(1), m_Constant(C))) {
2152 // bitcast (logic X, C) --> logic (bitcast X, C')
2153 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2154 Value *CastedC = ConstantExpr::getBitCast(C, DestTy);
2155 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2161 /// Change the type of a select if we can eliminate a bitcast.
2162 static Instruction *foldBitCastSelect(BitCastInst &BitCast,
2163 InstCombiner::BuilderTy &Builder) {
2164 Value *Cond, *TVal, *FVal;
2165 if (!match(BitCast.getOperand(0),
2166 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2169 // A vector select must maintain the same number of elements in its operands.
2170 Type *CondTy = Cond->getType();
2171 Type *DestTy = BitCast.getType();
2172 if (CondTy->isVectorTy()) {
2173 if (!DestTy->isVectorTy())
2175 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements())
2179 // FIXME: This transform is restricted from changing the select between
2180 // scalars and vectors to avoid backend problems caused by creating
2181 // potentially illegal operations. If a fix-up is added to handle that
2182 // situation, we can remove this check.
2183 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2186 auto *Sel = cast<Instruction>(BitCast.getOperand(0));
2188 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2189 !isa<Constant>(X)) {
2190 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2191 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2192 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2195 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2196 !isa<Constant>(X)) {
2197 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2198 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2199 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2205 /// Check if all users of CI are StoreInsts.
2206 static bool hasStoreUsersOnly(CastInst &CI) {
2207 for (User *U : CI.users()) {
2208 if (!isa<StoreInst>(U))
2214 /// This function handles following case
2220 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2221 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
2222 Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
2223 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2224 if (hasStoreUsersOnly(CI))
2227 Value *Src = CI.getOperand(0);
2228 Type *SrcTy = Src->getType(); // Type B
2229 Type *DestTy = CI.getType(); // Type A
2231 SmallVector<PHINode *, 4> PhiWorklist;
2232 SmallSetVector<PHINode *, 4> OldPhiNodes;
2234 // Find all of the A->B casts and PHI nodes.
2235 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so
2236 // OldPhiNodes is used to track all known PHI nodes, before adding a new
2237 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2238 PhiWorklist.push_back(PN);
2239 OldPhiNodes.insert(PN);
2240 while (!PhiWorklist.empty()) {
2241 auto *OldPN = PhiWorklist.pop_back_val();
2242 for (Value *IncValue : OldPN->incoming_values()) {
2243 if (isa<Constant>(IncValue))
2246 if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
2247 // If there is a sequence of one or more load instructions, each loaded
2248 // value is used as address of later load instruction, bitcast is
2249 // necessary to change the value type, don't optimize it. For
2250 // simplicity we give up if the load address comes from another load.
2251 Value *Addr = LI->getOperand(0);
2252 if (Addr == &CI || isa<LoadInst>(Addr))
2254 if (LI->hasOneUse() && LI->isSimple())
2256 // If a LoadInst has more than one use, changing the type of loaded
2257 // value may create another bitcast.
2261 if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
2262 if (OldPhiNodes.insert(PNode))
2263 PhiWorklist.push_back(PNode);
2267 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2268 // We can't handle other instructions.
2272 // Verify it's a A->B cast.
2273 Type *TyA = BCI->getOperand(0)->getType();
2274 Type *TyB = BCI->getType();
2275 if (TyA != DestTy || TyB != SrcTy)
2280 // Check that each user of each old PHI node is something that we can
2281 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards.
2282 for (auto *OldPN : OldPhiNodes) {
2283 for (User *V : OldPN->users()) {
2284 if (auto *SI = dyn_cast<StoreInst>(V)) {
2285 if (!SI->isSimple() || SI->getOperand(0) != OldPN)
2287 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2288 // Verify it's a B->A cast.
2289 Type *TyB = BCI->getOperand(0)->getType();
2290 Type *TyA = BCI->getType();
2291 if (TyA != DestTy || TyB != SrcTy)
2293 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2294 // As long as the user is another old PHI node, then even if we don't
2295 // rewrite it, the PHI web we're considering won't have any users
2296 // outside itself, so it'll be dead.
2297 if (OldPhiNodes.count(PHI) == 0)
2305 // For each old PHI node, create a corresponding new PHI node with a type A.
2306 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2307 for (auto *OldPN : OldPhiNodes) {
2308 Builder.SetInsertPoint(OldPN);
2309 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2310 NewPNodes[OldPN] = NewPN;
2313 // Fill in the operands of new PHI nodes.
2314 for (auto *OldPN : OldPhiNodes) {
2315 PHINode *NewPN = NewPNodes[OldPN];
2316 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2317 Value *V = OldPN->getOperand(j);
2318 Value *NewV = nullptr;
2319 if (auto *C = dyn_cast<Constant>(V)) {
2320 NewV = ConstantExpr::getBitCast(C, DestTy);
2321 } else if (auto *LI = dyn_cast<LoadInst>(V)) {
2322 // Explicitly perform load combine to make sure no opposing transform
2323 // can remove the bitcast in the meantime and trigger an infinite loop.
2324 Builder.SetInsertPoint(LI);
2325 NewV = combineLoadToNewType(*LI, DestTy);
2326 // Remove the old load and its use in the old phi, which itself becomes
2327 // dead once the whole transform finishes.
2328 replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
2329 eraseInstFromFunction(*LI);
2330 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2331 NewV = BCI->getOperand(0);
2332 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
2333 NewV = NewPNodes[PrevPN];
2336 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2340 // Traverse all accumulated PHI nodes and process its users,
2341 // which are Stores and BitcCasts. Without this processing
2342 // NewPHI nodes could be replicated and could lead to extra
2343 // moves generated after DeSSA.
2344 // If there is a store with type B, change it to type A.
2347 // Replace users of BitCast B->A with NewPHI. These will help
2348 // later to get rid off a closure formed by OldPHI nodes.
2349 Instruction *RetVal = nullptr;
2350 for (auto *OldPN : OldPhiNodes) {
2351 PHINode *NewPN = NewPNodes[OldPN];
2352 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) {
2354 // We may remove this user, advance to avoid iterator invalidation.
2356 if (auto *SI = dyn_cast<StoreInst>(V)) {
2357 assert(SI->isSimple() && SI->getOperand(0) == OldPN);
2358 Builder.SetInsertPoint(SI);
2360 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy));
2361 SI->setOperand(0, NewBC);
2363 assert(hasStoreUsersOnly(*NewBC));
2365 else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2366 Type *TyB = BCI->getOperand(0)->getType();
2367 Type *TyA = BCI->getType();
2368 assert(TyA == DestTy && TyB == SrcTy);
2371 Instruction *I = replaceInstUsesWith(*BCI, NewPN);
2374 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2375 assert(OldPhiNodes.count(PHI) > 0);
2378 llvm_unreachable("all uses should be handled");
2386 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
2387 // If the operands are integer typed then apply the integer transforms,
2388 // otherwise just apply the common ones.
2389 Value *Src = CI.getOperand(0);
2390 Type *SrcTy = Src->getType();
2391 Type *DestTy = CI.getType();
2393 // Get rid of casts from one type to the same type. These are useless and can
2394 // be replaced by the operand.
2395 if (DestTy == Src->getType())
2396 return replaceInstUsesWith(CI, Src);
2398 if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
2399 PointerType *SrcPTy = cast<PointerType>(SrcTy);
2400 Type *DstElTy = DstPTy->getElementType();
2401 Type *SrcElTy = SrcPTy->getElementType();
2403 // Casting pointers between the same type, but with different address spaces
2404 // is an addrspace cast rather than a bitcast.
2405 if ((DstElTy == SrcElTy) &&
2406 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace()))
2407 return new AddrSpaceCastInst(Src, DestTy);
2409 // If we are casting a alloca to a pointer to a type of the same
2410 // size, rewrite the allocation instruction to allocate the "right" type.
2411 // There is no need to modify malloc calls because it is their bitcast that
2412 // needs to be cleaned up.
2413 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
2414 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
2417 // When the type pointed to is not sized the cast cannot be
2418 // turned into a gep.
2420 cast<PointerType>(Src->getType()->getScalarType())->getElementType();
2421 if (!PointeeType->isSized())
2424 // If the source and destination are pointers, and this cast is equivalent
2425 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
2426 // This can enhance SROA and other transforms that want type-safe pointers.
2427 unsigned NumZeros = 0;
2428 while (SrcElTy != DstElTy &&
2429 isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
2430 SrcElTy->getNumContainedTypes() /* not "{}" */) {
2431 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U);
2435 // If we found a path from the src to dest, create the getelementptr now.
2436 if (SrcElTy == DstElTy) {
2437 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2438 GetElementPtrInst *GEP =
2439 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs);
2441 // If the source pointer is dereferenceable, then assume it points to an
2442 // allocated object and apply "inbounds" to the GEP.
2444 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) {
2445 // In a non-default address space (not 0), a null pointer can not be
2446 // assumed inbounds, so ignore that case (dereferenceable_or_null).
2447 // The reason is that 'null' is not treated differently in these address
2448 // spaces, and we consequently ignore the 'gep inbounds' special case
2449 // for 'null' which allows 'inbounds' on 'null' if the indices are
2451 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull)
2452 GEP->setIsInBounds();
2458 if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
2459 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
2460 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2461 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
2462 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2463 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
2466 if (isa<IntegerType>(SrcTy)) {
2467 // If this is a cast from an integer to vector, check to see if the input
2468 // is a trunc or zext of a bitcast from vector. If so, we can replace all
2469 // the casts with a shuffle and (potentially) a bitcast.
2470 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2471 CastInst *SrcCast = cast<CastInst>(Src);
2472 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2473 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2474 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts(
2475 BCIn->getOperand(0), cast<VectorType>(DestTy), *this))
2479 // If the input is an 'or' instruction, we may be doing shifts and ors to
2480 // assemble the elements of the vector manually. Try to rip the code out
2481 // and replace it with insertelements.
2482 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
2483 return replaceInstUsesWith(CI, V);
2487 if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
2488 if (SrcVTy->getNumElements() == 1) {
2489 // If our destination is not a vector, then make this a straight
2490 // scalar-scalar cast.
2491 if (!DestTy->isVectorTy()) {
2493 Builder.CreateExtractElement(Src,
2494 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2495 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2498 // Otherwise, see if our source is an insert. If so, then use the scalar
2499 // component directly:
2500 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m>
2501 if (auto *InsElt = dyn_cast<InsertElementInst>(Src))
2502 return new BitCastInst(InsElt->getOperand(1), DestTy);
2506 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2507 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
2508 // a bitcast to a vector with the same # elts.
2509 Value *ShufOp0 = Shuf->getOperand(0);
2510 Value *ShufOp1 = Shuf->getOperand(1);
2511 unsigned NumShufElts = Shuf->getType()->getVectorNumElements();
2512 unsigned NumSrcVecElts = ShufOp0->getType()->getVectorNumElements();
2513 if (Shuf->hasOneUse() && DestTy->isVectorTy() &&
2514 DestTy->getVectorNumElements() == NumShufElts &&
2515 NumShufElts == NumSrcVecElts) {
2517 // If either of the operands is a cast from CI.getType(), then
2518 // evaluating the shuffle in the casted destination's type will allow
2519 // us to eliminate at least one cast.
2520 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2521 Tmp->getOperand(0)->getType() == DestTy) ||
2522 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2523 Tmp->getOperand(0)->getType() == DestTy)) {
2524 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy);
2525 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy);
2526 // Return a new shuffle vector. Use the same element ID's, as we
2527 // know the vector types match #elts.
2528 return new ShuffleVectorInst(LHS, RHS, Shuf->getOperand(2));
2532 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as
2534 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X)
2535 // TODO: We should match the related pattern for bitreverse.
2536 if (DestTy->isIntegerTy() &&
2537 DL.isLegalInteger(DestTy->getScalarSizeInBits()) &&
2538 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 &&
2539 Shuf->hasOneUse() && Shuf->isReverse()) {
2540 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask");
2541 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op");
2543 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy);
2544 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy);
2545 return IntrinsicInst::Create(Bswap, { ScalarX });
2549 // Handle the A->B->A cast, and there is an intervening PHI node.
2550 if (PHINode *PN = dyn_cast<PHINode>(Src))
2551 if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
2554 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
2557 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
2560 if (Instruction *I = foldBitCastSelect(CI, Builder))
2563 if (SrcTy->isPointerTy())
2564 return commonPointerCastTransforms(CI);
2565 return commonCastTransforms(CI);
2568 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
2569 // If the destination pointer element type is not the same as the source's
2570 // first do a bitcast to the destination type, and then the addrspacecast.
2571 // This allows the cast to be exposed to other transforms.
2572 Value *Src = CI.getOperand(0);
2573 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2574 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
2576 Type *DestElemTy = DestTy->getElementType();
2577 if (SrcTy->getElementType() != DestElemTy) {
2578 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
2579 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) {
2580 // Handle vectors of pointers.
2581 MidTy = VectorType::get(MidTy, VT->getNumElements());
2584 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
2585 return new AddrSpaceCastInst(NewBitCast, CI.getType());
2588 return commonPointerCastTransforms(CI);