1 //===- InstCombineCasts.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for cast operations.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/Analysis/ConstantFolding.h"
17 #include "llvm/Analysis/TargetLibraryInfo.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
22 using namespace PatternMatch;
24 #define DEBUG_TYPE "instcombine"
26 /// Analyze 'Val', seeing if it is a simple linear expression.
27 /// If so, decompose it, returning some value X, such that Val is
30 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
32 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
33 Offset = CI->getZExtValue();
35 return ConstantInt::get(Val->getType(), 0);
38 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
39 // Cannot look past anything that might overflow.
40 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
41 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
47 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
48 if (I->getOpcode() == Instruction::Shl) {
49 // This is a value scaled by '1 << the shift amt'.
50 Scale = UINT64_C(1) << RHS->getZExtValue();
52 return I->getOperand(0);
55 if (I->getOpcode() == Instruction::Mul) {
56 // This value is scaled by 'RHS'.
57 Scale = RHS->getZExtValue();
59 return I->getOperand(0);
62 if (I->getOpcode() == Instruction::Add) {
63 // We have X+C. Check to see if we really have (X*C2)+C1,
64 // where C1 is divisible by C2.
67 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
68 Offset += RHS->getZExtValue();
75 // Otherwise, we can't look past this.
81 /// If we find a cast of an allocation instruction, try to eliminate the cast by
82 /// moving the type information into the alloc.
83 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
85 PointerType *PTy = cast<PointerType>(CI.getType());
87 BuilderTy AllocaBuilder(Builder);
88 AllocaBuilder.SetInsertPoint(&AI);
90 // Get the type really allocated and the type casted to.
91 Type *AllocElTy = AI.getAllocatedType();
92 Type *CastElTy = PTy->getElementType();
93 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
95 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy);
96 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy);
97 if (CastElTyAlign < AllocElTyAlign) return nullptr;
99 // If the allocation has multiple uses, only promote it if we are strictly
100 // increasing the alignment of the resultant allocation. If we keep it the
101 // same, we open the door to infinite loops of various kinds.
102 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
104 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy);
105 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy);
106 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
108 // If the allocation has multiple uses, only promote it if we're not
109 // shrinking the amount of memory being allocated.
110 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy);
111 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy);
112 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
114 // See if we can satisfy the modulus by pulling a scale out of the array
116 unsigned ArraySizeScale;
117 uint64_t ArrayOffset;
118 Value *NumElements = // See if the array size is a decomposable linear expr.
119 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
121 // If we can now satisfy the modulus, by using a non-1 scale, we really can
123 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
124 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
126 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
127 Value *Amt = nullptr;
131 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
132 // Insert before the alloca, not before the cast.
133 Amt = AllocaBuilder.CreateMul(Amt, NumElements);
136 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
137 Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
139 Amt = AllocaBuilder.CreateAdd(Amt, Off);
142 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
143 New->setAlignment(AI.getAlignment());
145 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
147 // If the allocation has multiple real uses, insert a cast and change all
148 // things that used it to use the new cast. This will also hack on CI, but it
150 if (!AI.hasOneUse()) {
151 // New is the allocation instruction, pointer typed. AI is the original
152 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
153 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
154 replaceInstUsesWith(AI, NewCast);
156 return replaceInstUsesWith(CI, New);
159 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
160 /// true for, actually insert the code to evaluate the expression.
161 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
163 if (Constant *C = dyn_cast<Constant>(V)) {
164 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
165 // If we got a constantexpr back, try to simplify it with DL info.
166 if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI))
171 // Otherwise, it must be an instruction.
172 Instruction *I = cast<Instruction>(V);
173 Instruction *Res = nullptr;
174 unsigned Opc = I->getOpcode();
176 case Instruction::Add:
177 case Instruction::Sub:
178 case Instruction::Mul:
179 case Instruction::And:
180 case Instruction::Or:
181 case Instruction::Xor:
182 case Instruction::AShr:
183 case Instruction::LShr:
184 case Instruction::Shl:
185 case Instruction::UDiv:
186 case Instruction::URem: {
187 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
188 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
189 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
192 case Instruction::Trunc:
193 case Instruction::ZExt:
194 case Instruction::SExt:
195 // If the source type of the cast is the type we're trying for then we can
196 // just return the source. There's no need to insert it because it is not
198 if (I->getOperand(0)->getType() == Ty)
199 return I->getOperand(0);
201 // Otherwise, must be the same type of cast, so just reinsert a new one.
202 // This also handles the case of zext(trunc(x)) -> zext(x).
203 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
204 Opc == Instruction::SExt);
206 case Instruction::Select: {
207 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
208 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
209 Res = SelectInst::Create(I->getOperand(0), True, False);
212 case Instruction::PHI: {
213 PHINode *OPN = cast<PHINode>(I);
214 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
215 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
217 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
218 NPN->addIncoming(V, OPN->getIncomingBlock(i));
224 // TODO: Can handle more cases here.
225 llvm_unreachable("Unreachable!");
229 return InsertNewInstWith(Res, *I);
232 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst *CI1,
233 const CastInst *CI2) {
234 Type *SrcTy = CI1->getSrcTy();
235 Type *MidTy = CI1->getDestTy();
236 Type *DstTy = CI2->getDestTy();
238 Instruction::CastOps firstOp = CI1->getOpcode();
239 Instruction::CastOps secondOp = CI2->getOpcode();
241 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
243 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
245 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
246 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
247 DstTy, SrcIntPtrTy, MidIntPtrTy,
250 // We don't want to form an inttoptr or ptrtoint that converts to an integer
251 // type that differs from the pointer size.
252 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
253 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
256 return Instruction::CastOps(Res);
259 /// @brief Implement the transforms common to all CastInst visitors.
260 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
261 Value *Src = CI.getOperand(0);
263 // Try to eliminate a cast of a cast.
264 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
265 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
266 // The first cast (CSrc) is eliminable so we need to fix up or replace
267 // the second cast (CI). CSrc will then have a good chance of being dead.
268 return CastInst::Create(NewOpc, CSrc->getOperand(0), CI.getType());
272 // If we are casting a select, then fold the cast into the select.
273 if (auto *SI = dyn_cast<SelectInst>(Src))
274 if (Instruction *NV = FoldOpIntoSelect(CI, SI))
277 // If we are casting a PHI, then fold the cast into the PHI.
278 if (auto *PN = dyn_cast<PHINode>(Src)) {
279 // Don't do this if it would create a PHI node with an illegal type from a
281 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
282 shouldChangeType(CI.getType(), Src->getType()))
283 if (Instruction *NV = foldOpIntoPhi(CI, PN))
290 /// Return true if we can evaluate the specified expression tree as type Ty
291 /// instead of its larger type, and arrive with the same value.
292 /// This is used by code that tries to eliminate truncates.
294 /// Ty will always be a type smaller than V. We should return true if trunc(V)
295 /// can be computed by computing V in the smaller type. If V is an instruction,
296 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
297 /// makes sense if x and y can be efficiently truncated.
299 /// This function works on both vectors and scalars.
301 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC,
303 // We can always evaluate constants in another type.
304 if (isa<Constant>(V))
307 Instruction *I = dyn_cast<Instruction>(V);
308 if (!I) return false;
310 Type *OrigTy = V->getType();
312 // If this is an extension from the dest type, we can eliminate it, even if it
313 // has multiple uses.
314 if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
315 I->getOperand(0)->getType() == Ty)
318 // We can't extend or shrink something that has multiple uses: doing so would
319 // require duplicating the instruction in general, which isn't profitable.
320 if (!I->hasOneUse()) return false;
322 unsigned Opc = I->getOpcode();
324 case Instruction::Add:
325 case Instruction::Sub:
326 case Instruction::Mul:
327 case Instruction::And:
328 case Instruction::Or:
329 case Instruction::Xor:
330 // These operators can all arbitrarily be extended or truncated.
331 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
332 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
334 case Instruction::UDiv:
335 case Instruction::URem: {
336 // UDiv and URem can be truncated if all the truncated bits are zero.
337 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
338 uint32_t BitWidth = Ty->getScalarSizeInBits();
339 if (BitWidth < OrigBitWidth) {
340 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
341 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
342 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
343 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
344 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
349 case Instruction::Shl: {
350 // If we are truncating the result of this SHL, and if it's a shift of a
351 // constant amount, we can always perform a SHL in a smaller type.
353 if (match(I->getOperand(1), m_APInt(Amt))) {
354 uint32_t BitWidth = Ty->getScalarSizeInBits();
355 if (Amt->getLimitedValue(BitWidth) < BitWidth)
356 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
360 case Instruction::LShr: {
361 // If this is a truncate of a logical shr, we can truncate it to a smaller
362 // lshr iff we know that the bits we would otherwise be shifting in are
365 if (match(I->getOperand(1), m_APInt(Amt))) {
366 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
367 uint32_t BitWidth = Ty->getScalarSizeInBits();
368 if (IC.MaskedValueIsZero(I->getOperand(0),
369 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth), 0, CxtI) &&
370 Amt->getLimitedValue(BitWidth) < BitWidth) {
371 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
376 case Instruction::AShr: {
377 // If this is a truncate of an arithmetic shr, we can truncate it to a
378 // smaller ashr iff we know that all the bits from the sign bit of the
379 // original type and the sign bit of the truncate type are similar.
380 // TODO: It is enough to check that the bits we would be shifting in are
381 // similar to sign bit of the truncate type.
383 if (match(I->getOperand(1), m_APInt(Amt))) {
384 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
385 uint32_t BitWidth = Ty->getScalarSizeInBits();
386 if (Amt->getLimitedValue(BitWidth) < BitWidth &&
387 OrigBitWidth - BitWidth <
388 IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
389 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
393 case Instruction::Trunc:
394 // trunc(trunc(x)) -> trunc(x)
396 case Instruction::ZExt:
397 case Instruction::SExt:
398 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
399 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
401 case Instruction::Select: {
402 SelectInst *SI = cast<SelectInst>(I);
403 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
404 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
406 case Instruction::PHI: {
407 // We can change a phi if we can change all operands. Note that we never
408 // get into trouble with cyclic PHIs here because we only consider
409 // instructions with a single use.
410 PHINode *PN = cast<PHINode>(I);
411 for (Value *IncValue : PN->incoming_values())
412 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
417 // TODO: Can handle more cases here.
424 /// Given a vector that is bitcast to an integer, optionally logically
425 /// right-shifted, and truncated, convert it to an extractelement.
426 /// Example (big endian):
427 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
429 /// extractelement <4 x i32> %X, 1
430 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) {
431 Value *TruncOp = Trunc.getOperand(0);
432 Type *DestType = Trunc.getType();
433 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
436 Value *VecInput = nullptr;
437 ConstantInt *ShiftVal = nullptr;
438 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
439 m_LShr(m_BitCast(m_Value(VecInput)),
440 m_ConstantInt(ShiftVal)))) ||
441 !isa<VectorType>(VecInput->getType()))
444 VectorType *VecType = cast<VectorType>(VecInput->getType());
445 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
446 unsigned DestWidth = DestType->getPrimitiveSizeInBits();
447 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
449 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
452 // If the element type of the vector doesn't match the result type,
453 // bitcast it to a vector type that we can extract from.
454 unsigned NumVecElts = VecWidth / DestWidth;
455 if (VecType->getElementType() != DestType) {
456 VecType = VectorType::get(DestType, NumVecElts);
457 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
460 unsigned Elt = ShiftAmount / DestWidth;
461 if (IC.getDataLayout().isBigEndian())
462 Elt = NumVecElts - 1 - Elt;
464 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
467 /// Rotate left/right may occur in a wider type than necessary because of type
468 /// promotion rules. Try to narrow all of the component instructions.
469 Instruction *InstCombiner::narrowRotate(TruncInst &Trunc) {
470 assert((isa<VectorType>(Trunc.getSrcTy()) ||
471 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
472 "Don't narrow to an illegal scalar type");
474 // First, find an or'd pair of opposite shifts with the same shifted operand:
475 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1))
477 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1)))))
480 Value *ShVal, *ShAmt0, *ShAmt1;
481 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
482 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
485 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode();
486 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode();
487 if (ShiftOpcode0 == ShiftOpcode1)
490 // The shift amounts must add up to the narrow bit width.
493 Type *DestTy = Trunc.getType();
494 unsigned NarrowWidth = DestTy->getScalarSizeInBits();
496 m_OneUse(m_Sub(m_SpecificInt(NarrowWidth), m_Specific(ShAmt1))))) {
499 } else if (match(ShAmt1, m_OneUse(m_Sub(m_SpecificInt(NarrowWidth),
500 m_Specific(ShAmt0))))) {
507 // The shifted value must have high zeros in the wide type. Typically, this
508 // will be a zext, but it could also be the result of an 'and' or 'shift'.
509 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
510 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
511 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc))
514 // We have an unnecessarily wide rotate!
515 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt))
516 // Narrow it down to eliminate the zext/trunc:
517 // or (lshr trunc(ShVal), ShAmt0'), (shl trunc(ShVal), ShAmt1')
518 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
519 Value *NegShAmt = Builder.CreateNeg(NarrowShAmt);
521 // Mask both shift amounts to ensure there's no UB from oversized shifts.
522 Constant *MaskC = ConstantInt::get(DestTy, NarrowWidth - 1);
523 Value *MaskedShAmt = Builder.CreateAnd(NarrowShAmt, MaskC);
524 Value *MaskedNegShAmt = Builder.CreateAnd(NegShAmt, MaskC);
526 // Truncate the original value and use narrow ops.
527 Value *X = Builder.CreateTrunc(ShVal, DestTy);
528 Value *NarrowShAmt0 = SubIsOnLHS ? MaskedNegShAmt : MaskedShAmt;
529 Value *NarrowShAmt1 = SubIsOnLHS ? MaskedShAmt : MaskedNegShAmt;
530 Value *NarrowSh0 = Builder.CreateBinOp(ShiftOpcode0, X, NarrowShAmt0);
531 Value *NarrowSh1 = Builder.CreateBinOp(ShiftOpcode1, X, NarrowShAmt1);
532 return BinaryOperator::CreateOr(NarrowSh0, NarrowSh1);
535 /// Try to narrow the width of math or bitwise logic instructions by pulling a
536 /// truncate ahead of binary operators.
537 /// TODO: Transforms for truncated shifts should be moved into here.
538 Instruction *InstCombiner::narrowBinOp(TruncInst &Trunc) {
539 Type *SrcTy = Trunc.getSrcTy();
540 Type *DestTy = Trunc.getType();
541 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
544 BinaryOperator *BinOp;
545 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
548 Value *BinOp0 = BinOp->getOperand(0);
549 Value *BinOp1 = BinOp->getOperand(1);
550 switch (BinOp->getOpcode()) {
551 case Instruction::And:
552 case Instruction::Or:
553 case Instruction::Xor:
554 case Instruction::Add:
555 case Instruction::Sub:
556 case Instruction::Mul: {
558 if (match(BinOp0, m_Constant(C))) {
559 // trunc (binop C, X) --> binop (trunc C', X)
560 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
561 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy);
562 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
564 if (match(BinOp1, m_Constant(C))) {
565 // trunc (binop X, C) --> binop (trunc X, C')
566 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
567 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy);
568 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
571 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
572 // trunc (binop (ext X), Y) --> binop X, (trunc Y)
573 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
574 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
576 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
577 // trunc (binop Y, (ext X)) --> binop (trunc Y), X
578 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
579 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
587 if (Instruction *NarrowOr = narrowRotate(Trunc))
593 /// Try to narrow the width of a splat shuffle. This could be generalized to any
594 /// shuffle with a constant operand, but we limit the transform to avoid
595 /// creating a shuffle type that targets may not be able to lower effectively.
596 static Instruction *shrinkSplatShuffle(TruncInst &Trunc,
597 InstCombiner::BuilderTy &Builder) {
598 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
599 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) &&
600 Shuf->getMask()->getSplatValue() &&
601 Shuf->getType() == Shuf->getOperand(0)->getType()) {
602 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask
603 Constant *NarrowUndef = UndefValue::get(Trunc.getType());
604 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
605 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask());
611 /// Try to narrow the width of an insert element. This could be generalized for
612 /// any vector constant, but we limit the transform to insertion into undef to
613 /// avoid potential backend problems from unsupported insertion widths. This
614 /// could also be extended to handle the case of inserting a scalar constant
615 /// into a vector variable.
616 static Instruction *shrinkInsertElt(CastInst &Trunc,
617 InstCombiner::BuilderTy &Builder) {
618 Instruction::CastOps Opcode = Trunc.getOpcode();
619 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
620 "Unexpected instruction for shrinking");
622 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
623 if (!InsElt || !InsElt->hasOneUse())
626 Type *DestTy = Trunc.getType();
627 Type *DestScalarTy = DestTy->getScalarType();
628 Value *VecOp = InsElt->getOperand(0);
629 Value *ScalarOp = InsElt->getOperand(1);
630 Value *Index = InsElt->getOperand(2);
632 if (isa<UndefValue>(VecOp)) {
633 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
634 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
635 UndefValue *NarrowUndef = UndefValue::get(DestTy);
636 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
637 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
643 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
644 if (Instruction *Result = commonCastTransforms(CI))
647 // Test if the trunc is the user of a select which is part of a
648 // minimum or maximum operation. If so, don't do any more simplification.
649 // Even simplifying demanded bits can break the canonical form of a
652 if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0)))
653 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN)
656 // See if we can simplify any instructions used by the input whose sole
657 // purpose is to compute bits we don't care about.
658 if (SimplifyDemandedInstructionBits(CI))
661 Value *Src = CI.getOperand(0);
662 Type *DestTy = CI.getType(), *SrcTy = Src->getType();
664 // Attempt to truncate the entire input expression tree to the destination
665 // type. Only do this if the dest type is a simple type, don't convert the
666 // expression tree to something weird like i93 unless the source is also
668 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
669 canEvaluateTruncated(Src, DestTy, *this, &CI)) {
671 // If this cast is a truncate, evaluting in a different type always
672 // eliminates the cast, so it is always a win.
673 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
674 " to avoid cast: " << CI << '\n');
675 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
676 assert(Res->getType() == DestTy);
677 return replaceInstUsesWith(CI, Res);
680 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
681 if (DestTy->getScalarSizeInBits() == 1) {
682 Constant *One = ConstantInt::get(SrcTy, 1);
683 Src = Builder.CreateAnd(Src, One);
684 Value *Zero = Constant::getNullValue(Src->getType());
685 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
688 // FIXME: Maybe combine the next two transforms to handle the no cast case
689 // more efficiently. Support vector types. Cleanup code by using m_OneUse.
691 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
692 Value *A = nullptr; ConstantInt *Cst = nullptr;
693 if (Src->hasOneUse() &&
694 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
695 // We have three types to worry about here, the type of A, the source of
696 // the truncate (MidSize), and the destination of the truncate. We know that
697 // ASize < MidSize and MidSize > ResultSize, but don't know the relation
698 // between ASize and ResultSize.
699 unsigned ASize = A->getType()->getPrimitiveSizeInBits();
701 // If the shift amount is larger than the size of A, then the result is
702 // known to be zero because all the input bits got shifted out.
703 if (Cst->getZExtValue() >= ASize)
704 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy));
706 // Since we're doing an lshr and a zero extend, and know that the shift
707 // amount is smaller than ASize, it is always safe to do the shift in A's
708 // type, then zero extend or truncate to the result.
709 Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue());
710 Shift->takeName(Src);
711 return CastInst::CreateIntegerCast(Shift, DestTy, false);
714 // FIXME: We should canonicalize to zext/trunc and remove this transform.
715 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type
717 // It works because bits coming from sign extension have the same value as
718 // the sign bit of the original value; performing ashr instead of lshr
719 // generates bits of the same value as the sign bit.
720 if (Src->hasOneUse() &&
721 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst)))) {
722 Value *SExt = cast<Instruction>(Src)->getOperand(0);
723 const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits();
724 const unsigned ASize = A->getType()->getPrimitiveSizeInBits();
725 const unsigned CISize = CI.getType()->getPrimitiveSizeInBits();
726 const unsigned MaxAmt = SExtSize - std::max(CISize, ASize);
727 unsigned ShiftAmt = Cst->getZExtValue();
729 // This optimization can be only performed when zero bits generated by
730 // the original lshr aren't pulled into the value after truncation, so we
731 // can only shift by values no larger than the number of extension bits.
732 // FIXME: Instead of bailing when the shift is too large, use and to clear
734 if (ShiftAmt <= MaxAmt) {
736 return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(),
737 std::min(ShiftAmt, ASize - 1)));
738 if (SExt->hasOneUse()) {
739 Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1));
740 Shift->takeName(Src);
741 return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
746 if (Instruction *I = narrowBinOp(CI))
749 if (Instruction *I = shrinkSplatShuffle(CI, Builder))
752 if (Instruction *I = shrinkInsertElt(CI, Builder))
755 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) &&
756 shouldChangeType(SrcTy, DestTy)) {
757 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
758 // dest type is native and cst < dest size.
759 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) &&
760 !match(A, m_Shr(m_Value(), m_Constant()))) {
761 // Skip shifts of shift by constants. It undoes a combine in
762 // FoldShiftByConstant and is the extend in reg pattern.
763 const unsigned DestSize = DestTy->getScalarSizeInBits();
764 if (Cst->getValue().ult(DestSize)) {
765 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
767 return BinaryOperator::Create(
768 Instruction::Shl, NewTrunc,
769 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize)));
774 if (Instruction *I = foldVecTruncToExtElt(CI, *this))
780 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
782 // If we are just checking for a icmp eq of a single bit and zext'ing it
783 // to an integer, then shift the bit to the appropriate place and then
784 // cast to integer to avoid the comparison.
786 if (match(ICI->getOperand(1), m_APInt(Op1CV))) {
788 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
789 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
790 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) ||
791 (ICI->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) {
792 if (!DoTransform) return ICI;
794 Value *In = ICI->getOperand(0);
795 Value *Sh = ConstantInt::get(In->getType(),
796 In->getType()->getScalarSizeInBits() - 1);
797 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
798 if (In->getType() != CI.getType())
799 In = Builder.CreateIntCast(In, CI.getType(), false /*ZExt*/);
801 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
802 Constant *One = ConstantInt::get(In->getType(), 1);
803 In = Builder.CreateXor(In, One, In->getName() + ".not");
806 return replaceInstUsesWith(CI, In);
809 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
810 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
811 // zext (X == 1) to i32 --> X iff X has only the low bit set.
812 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
813 // zext (X != 0) to i32 --> X iff X has only the low bit set.
814 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
815 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
816 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
817 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) &&
818 // This only works for EQ and NE
820 // If Op1C some other power of two, convert:
821 KnownBits Known = computeKnownBits(ICI->getOperand(0), 0, &CI);
823 APInt KnownZeroMask(~Known.Zero);
824 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
825 if (!DoTransform) return ICI;
827 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
828 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) {
829 // (X&4) == 2 --> false
830 // (X&4) != 2 --> true
831 Constant *Res = ConstantInt::get(CI.getType(), isNE);
832 return replaceInstUsesWith(CI, Res);
835 uint32_t ShAmt = KnownZeroMask.logBase2();
836 Value *In = ICI->getOperand(0);
838 // Perform a logical shr by shiftamt.
839 // Insert the shift to put the result in the low bit.
840 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
841 In->getName() + ".lobit");
844 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit.
845 Constant *One = ConstantInt::get(In->getType(), 1);
846 In = Builder.CreateXor(In, One);
849 if (CI.getType() == In->getType())
850 return replaceInstUsesWith(CI, In);
852 Value *IntCast = Builder.CreateIntCast(In, CI.getType(), false);
853 return replaceInstUsesWith(CI, IntCast);
858 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
859 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
860 // may lead to additional simplifications.
861 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
862 if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
863 Value *LHS = ICI->getOperand(0);
864 Value *RHS = ICI->getOperand(1);
866 KnownBits KnownLHS = computeKnownBits(LHS, 0, &CI);
867 KnownBits KnownRHS = computeKnownBits(RHS, 0, &CI);
869 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
870 APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
871 APInt UnknownBit = ~KnownBits;
872 if (UnknownBit.countPopulation() == 1) {
873 if (!DoTransform) return ICI;
875 Value *Result = Builder.CreateXor(LHS, RHS);
877 // Mask off any bits that are set and won't be shifted away.
878 if (KnownLHS.One.uge(UnknownBit))
879 Result = Builder.CreateAnd(Result,
880 ConstantInt::get(ITy, UnknownBit));
882 // Shift the bit we're testing down to the lsb.
883 Result = Builder.CreateLShr(
884 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
886 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
887 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
888 Result->takeName(ICI);
889 return replaceInstUsesWith(CI, Result);
898 /// Determine if the specified value can be computed in the specified wider type
899 /// and produce the same low bits. If not, return false.
901 /// If this function returns true, it can also return a non-zero number of bits
902 /// (in BitsToClear) which indicates that the value it computes is correct for
903 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
904 /// out. For example, to promote something like:
906 /// %B = trunc i64 %A to i32
907 /// %C = lshr i32 %B, 8
908 /// %E = zext i32 %C to i64
910 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
911 /// set to 8 to indicate that the promoted value needs to have bits 24-31
912 /// cleared in addition to bits 32-63. Since an 'and' will be generated to
913 /// clear the top bits anyway, doing this has no extra cost.
915 /// This function works on both vectors and scalars.
916 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
917 InstCombiner &IC, Instruction *CxtI) {
919 if (isa<Constant>(V))
922 Instruction *I = dyn_cast<Instruction>(V);
923 if (!I) return false;
925 // If the input is a truncate from the destination type, we can trivially
927 if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
930 // We can't extend or shrink something that has multiple uses: doing so would
931 // require duplicating the instruction in general, which isn't profitable.
932 if (!I->hasOneUse()) return false;
934 unsigned Opc = I->getOpcode(), Tmp;
936 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
937 case Instruction::SExt: // zext(sext(x)) -> sext(x).
938 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
940 case Instruction::And:
941 case Instruction::Or:
942 case Instruction::Xor:
943 case Instruction::Add:
944 case Instruction::Sub:
945 case Instruction::Mul:
946 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
947 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
949 // These can all be promoted if neither operand has 'bits to clear'.
950 if (BitsToClear == 0 && Tmp == 0)
953 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
954 // other side, BitsToClear is ok.
955 if (Tmp == 0 && I->isBitwiseLogicOp()) {
956 // We use MaskedValueIsZero here for generality, but the case we care
957 // about the most is constant RHS.
958 unsigned VSize = V->getType()->getScalarSizeInBits();
959 if (IC.MaskedValueIsZero(I->getOperand(1),
960 APInt::getHighBitsSet(VSize, BitsToClear),
962 // If this is an And instruction and all of the BitsToClear are
963 // known to be zero we can reset BitsToClear.
964 if (Opc == Instruction::And)
970 // Otherwise, we don't know how to analyze this BitsToClear case yet.
973 case Instruction::Shl: {
974 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the
975 // upper bits we can reduce BitsToClear by the shift amount.
977 if (match(I->getOperand(1), m_APInt(Amt))) {
978 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
980 uint64_t ShiftAmt = Amt->getZExtValue();
981 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
986 case Instruction::LShr: {
987 // We can promote lshr(x, cst) if we can promote x. This requires the
988 // ultimate 'and' to clear out the high zero bits we're clearing out though.
990 if (match(I->getOperand(1), m_APInt(Amt))) {
991 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
993 BitsToClear += Amt->getZExtValue();
994 if (BitsToClear > V->getType()->getScalarSizeInBits())
995 BitsToClear = V->getType()->getScalarSizeInBits();
998 // Cannot promote variable LSHR.
1001 case Instruction::Select:
1002 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1003 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1004 // TODO: If important, we could handle the case when the BitsToClear are
1005 // known zero in the disagreeing side.
1010 case Instruction::PHI: {
1011 // We can change a phi if we can change all operands. Note that we never
1012 // get into trouble with cyclic PHIs here because we only consider
1013 // instructions with a single use.
1014 PHINode *PN = cast<PHINode>(I);
1015 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1017 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1018 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1019 // TODO: If important, we could handle the case when the BitsToClear
1020 // are known zero in the disagreeing input.
1026 // TODO: Can handle more cases here.
1031 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
1032 // If this zero extend is only used by a truncate, let the truncate be
1033 // eliminated before we try to optimize this zext.
1034 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1037 // If one of the common conversion will work, do it.
1038 if (Instruction *Result = commonCastTransforms(CI))
1041 Value *Src = CI.getOperand(0);
1042 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1044 // Attempt to extend the entire input expression tree to the destination
1045 // type. Only do this if the dest type is a simple type, don't convert the
1046 // expression tree to something weird like i93 unless the source is also
1048 unsigned BitsToClear;
1049 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
1050 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1051 assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
1052 "Can't clear more bits than in SrcTy");
1054 // Okay, we can transform this! Insert the new expression now.
1055 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1056 " to avoid zero extend: " << CI << '\n');
1057 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
1058 assert(Res->getType() == DestTy);
1060 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
1061 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1063 // If the high bits are already filled with zeros, just replace this
1064 // cast with the result.
1065 if (MaskedValueIsZero(Res,
1066 APInt::getHighBitsSet(DestBitSize,
1067 DestBitSize-SrcBitsKept),
1069 return replaceInstUsesWith(CI, Res);
1071 // We need to emit an AND to clear the high bits.
1072 Constant *C = ConstantInt::get(Res->getType(),
1073 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1074 return BinaryOperator::CreateAnd(Res, C);
1077 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1078 // types and if the sizes are just right we can convert this into a logical
1079 // 'and' which will be much cheaper than the pair of casts.
1080 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
1081 // TODO: Subsume this into EvaluateInDifferentType.
1083 // Get the sizes of the types involved. We know that the intermediate type
1084 // will be smaller than A or C, but don't know the relation between A and C.
1085 Value *A = CSrc->getOperand(0);
1086 unsigned SrcSize = A->getType()->getScalarSizeInBits();
1087 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1088 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1089 // If we're actually extending zero bits, then if
1090 // SrcSize < DstSize: zext(a & mask)
1091 // SrcSize == DstSize: a & mask
1092 // SrcSize > DstSize: trunc(a) & mask
1093 if (SrcSize < DstSize) {
1094 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1095 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
1096 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1097 return new ZExtInst(And, CI.getType());
1100 if (SrcSize == DstSize) {
1101 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1102 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1105 if (SrcSize > DstSize) {
1106 Value *Trunc = Builder.CreateTrunc(A, CI.getType());
1107 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1108 return BinaryOperator::CreateAnd(Trunc,
1109 ConstantInt::get(Trunc->getType(),
1114 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1115 return transformZExtICmp(ICI, CI);
1117 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
1118 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
1119 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one
1120 // of the (zext icmp) can be eliminated. If so, immediately perform the
1121 // according elimination.
1122 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
1123 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
1124 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
1125 (transformZExtICmp(LHS, CI, false) ||
1126 transformZExtICmp(RHS, CI, false))) {
1127 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp)
1128 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName());
1129 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName());
1130 BinaryOperator *Or = BinaryOperator::Create(Instruction::Or, LCast, RCast);
1132 // Perform the elimination.
1133 if (auto *LZExt = dyn_cast<ZExtInst>(LCast))
1134 transformZExtICmp(LHS, *LZExt);
1135 if (auto *RZExt = dyn_cast<ZExtInst>(RCast))
1136 transformZExtICmp(RHS, *RZExt);
1142 // zext(trunc(X) & C) -> (X & zext(C)).
1146 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1147 X->getType() == CI.getType())
1148 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1150 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1152 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1153 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1154 X->getType() == CI.getType()) {
1155 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
1156 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1162 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
1163 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
1164 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
1165 ICmpInst::Predicate Pred = ICI->getPredicate();
1167 // Don't bother if Op1 isn't of vector or integer type.
1168 if (!Op1->getType()->isIntOrIntVectorTy())
1171 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
1172 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
1173 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
1174 if ((Pred == ICmpInst::ICMP_SLT && Op1C->isNullValue()) ||
1175 (Pred == ICmpInst::ICMP_SGT && Op1C->isAllOnesValue())) {
1177 Value *Sh = ConstantInt::get(Op0->getType(),
1178 Op0->getType()->getScalarSizeInBits()-1);
1179 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1180 if (In->getType() != CI.getType())
1181 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1183 if (Pred == ICmpInst::ICMP_SGT)
1184 In = Builder.CreateNot(In, In->getName() + ".not");
1185 return replaceInstUsesWith(CI, In);
1189 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1190 // If we know that only one bit of the LHS of the icmp can be set and we
1191 // have an equality comparison with zero or a power of 2, we can transform
1192 // the icmp and sext into bitwise/integer operations.
1193 if (ICI->hasOneUse() &&
1194 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1195 KnownBits Known = computeKnownBits(Op0, 0, &CI);
1197 APInt KnownZeroMask(~Known.Zero);
1198 if (KnownZeroMask.isPowerOf2()) {
1199 Value *In = ICI->getOperand(0);
1201 // If the icmp tests for a known zero bit we can constant fold it.
1202 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1203 Value *V = Pred == ICmpInst::ICMP_NE ?
1204 ConstantInt::getAllOnesValue(CI.getType()) :
1205 ConstantInt::getNullValue(CI.getType());
1206 return replaceInstUsesWith(CI, V);
1209 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1210 // sext ((x & 2^n) == 0) -> (x >> n) - 1
1211 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1212 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1213 // Perform a right shift to place the desired bit in the LSB.
1215 In = Builder.CreateLShr(In,
1216 ConstantInt::get(In->getType(), ShiftAmt));
1218 // At this point "In" is either 1 or 0. Subtract 1 to turn
1219 // {1, 0} -> {0, -1}.
1220 In = Builder.CreateAdd(In,
1221 ConstantInt::getAllOnesValue(In->getType()),
1224 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
1225 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1226 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1227 // Perform a left shift to place the desired bit in the MSB.
1229 In = Builder.CreateShl(In,
1230 ConstantInt::get(In->getType(), ShiftAmt));
1232 // Distribute the bit over the whole bit width.
1233 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1234 KnownZeroMask.getBitWidth() - 1), "sext");
1237 if (CI.getType() == In->getType())
1238 return replaceInstUsesWith(CI, In);
1239 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1247 /// Return true if we can take the specified value and return it as type Ty
1248 /// without inserting any new casts and without changing the value of the common
1249 /// low bits. This is used by code that tries to promote integer operations to
1250 /// a wider types will allow us to eliminate the extension.
1252 /// This function works on both vectors and scalars.
1254 static bool canEvaluateSExtd(Value *V, Type *Ty) {
1255 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1256 "Can't sign extend type to a smaller type");
1257 // If this is a constant, it can be trivially promoted.
1258 if (isa<Constant>(V))
1261 Instruction *I = dyn_cast<Instruction>(V);
1262 if (!I) return false;
1264 // If this is a truncate from the dest type, we can trivially eliminate it.
1265 if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
1268 // We can't extend or shrink something that has multiple uses: doing so would
1269 // require duplicating the instruction in general, which isn't profitable.
1270 if (!I->hasOneUse()) return false;
1272 switch (I->getOpcode()) {
1273 case Instruction::SExt: // sext(sext(x)) -> sext(x)
1274 case Instruction::ZExt: // sext(zext(x)) -> zext(x)
1275 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1277 case Instruction::And:
1278 case Instruction::Or:
1279 case Instruction::Xor:
1280 case Instruction::Add:
1281 case Instruction::Sub:
1282 case Instruction::Mul:
1283 // These operators can all arbitrarily be extended if their inputs can.
1284 return canEvaluateSExtd(I->getOperand(0), Ty) &&
1285 canEvaluateSExtd(I->getOperand(1), Ty);
1287 //case Instruction::Shl: TODO
1288 //case Instruction::LShr: TODO
1290 case Instruction::Select:
1291 return canEvaluateSExtd(I->getOperand(1), Ty) &&
1292 canEvaluateSExtd(I->getOperand(2), Ty);
1294 case Instruction::PHI: {
1295 // We can change a phi if we can change all operands. Note that we never
1296 // get into trouble with cyclic PHIs here because we only consider
1297 // instructions with a single use.
1298 PHINode *PN = cast<PHINode>(I);
1299 for (Value *IncValue : PN->incoming_values())
1300 if (!canEvaluateSExtd(IncValue, Ty)) return false;
1304 // TODO: Can handle more cases here.
1311 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
1312 // If this sign extend is only used by a truncate, let the truncate be
1313 // eliminated before we try to optimize this sext.
1314 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1317 if (Instruction *I = commonCastTransforms(CI))
1320 Value *Src = CI.getOperand(0);
1321 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1323 // If we know that the value being extended is positive, we can use a zext
1325 KnownBits Known = computeKnownBits(Src, 0, &CI);
1326 if (Known.isNonNegative()) {
1327 Value *ZExt = Builder.CreateZExt(Src, DestTy);
1328 return replaceInstUsesWith(CI, ZExt);
1331 // Attempt to extend the entire input expression tree to the destination
1332 // type. Only do this if the dest type is a simple type, don't convert the
1333 // expression tree to something weird like i93 unless the source is also
1335 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
1336 canEvaluateSExtd(Src, DestTy)) {
1337 // Okay, we can transform this! Insert the new expression now.
1338 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1339 " to avoid sign extend: " << CI << '\n');
1340 Value *Res = EvaluateInDifferentType(Src, DestTy, true);
1341 assert(Res->getType() == DestTy);
1343 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
1344 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1346 // If the high bits are already filled with sign bit, just replace this
1347 // cast with the result.
1348 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1349 return replaceInstUsesWith(CI, Res);
1351 // We need to emit a shl + ashr to do the sign extend.
1352 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1353 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1357 // If the input is a trunc from the destination type, then turn sext(trunc(x))
1360 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) {
1361 // sext(trunc(X)) --> ashr(shl(X, C), C)
1362 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1363 unsigned DestBitSize = DestTy->getScalarSizeInBits();
1364 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1365 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1368 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1369 return transformSExtICmp(ICI, CI);
1371 // If the input is a shl/ashr pair of a same constant, then this is a sign
1372 // extension from a smaller value. If we could trust arbitrary bitwidth
1373 // integers, we could turn this into a truncate to the smaller bit and then
1374 // use a sext for the whole extension. Since we don't, look deeper and check
1375 // for a truncate. If the source and dest are the same type, eliminate the
1376 // trunc and extend and just do shifts. For example, turn:
1377 // %a = trunc i32 %i to i8
1378 // %b = shl i8 %a, 6
1379 // %c = ashr i8 %b, 6
1380 // %d = sext i8 %c to i32
1382 // %a = shl i32 %i, 30
1383 // %d = ashr i32 %a, 30
1385 // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1386 ConstantInt *BA = nullptr, *CA = nullptr;
1387 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
1388 m_ConstantInt(CA))) &&
1389 BA == CA && A->getType() == CI.getType()) {
1390 unsigned MidSize = Src->getType()->getScalarSizeInBits();
1391 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
1392 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
1393 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
1394 A = Builder.CreateShl(A, ShAmtV, CI.getName());
1395 return BinaryOperator::CreateAShr(A, ShAmtV);
1402 /// Return a Constant* for the specified floating-point constant if it fits
1403 /// in the specified FP type without changing its value.
1404 static Constant *fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
1406 APFloat F = CFP->getValueAPF();
1407 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1409 return ConstantFP::get(CFP->getContext(), F);
1413 /// Look through floating-point extensions until we get the source value.
1414 static Value *lookThroughFPExtensions(Value *V) {
1415 while (auto *FPExt = dyn_cast<FPExtInst>(V))
1416 V = FPExt->getOperand(0);
1418 // If this value is a constant, return the constant in the smallest FP type
1419 // that can accurately represent it. This allows us to turn
1420 // (float)((double)X+2.0) into x+2.0f.
1421 if (auto *CFP = dyn_cast<ConstantFP>(V)) {
1422 if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext()))
1423 return V; // No constant folding of this.
1424 // See if the value can be truncated to half and then reextended.
1425 if (Value *V = fitsInFPType(CFP, APFloat::IEEEhalf()))
1427 // See if the value can be truncated to float and then reextended.
1428 if (Value *V = fitsInFPType(CFP, APFloat::IEEEsingle()))
1430 if (CFP->getType()->isDoubleTy())
1431 return V; // Won't shrink.
1432 if (Value *V = fitsInFPType(CFP, APFloat::IEEEdouble()))
1434 // Don't try to shrink to various long double types.
1440 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
1441 if (Instruction *I = commonCastTransforms(CI))
1443 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1444 // simplify this expression to avoid one or more of the trunc/extend
1445 // operations if we can do so without changing the numerical results.
1447 // The exact manner in which the widths of the operands interact to limit
1448 // what we can and cannot do safely varies from operation to operation, and
1449 // is explained below in the various case statements.
1450 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
1451 if (OpI && OpI->hasOneUse()) {
1452 Value *LHSOrig = lookThroughFPExtensions(OpI->getOperand(0));
1453 Value *RHSOrig = lookThroughFPExtensions(OpI->getOperand(1));
1454 unsigned OpWidth = OpI->getType()->getFPMantissaWidth();
1455 unsigned LHSWidth = LHSOrig->getType()->getFPMantissaWidth();
1456 unsigned RHSWidth = RHSOrig->getType()->getFPMantissaWidth();
1457 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1458 unsigned DstWidth = CI.getType()->getFPMantissaWidth();
1459 switch (OpI->getOpcode()) {
1461 case Instruction::FAdd:
1462 case Instruction::FSub:
1463 // For addition and subtraction, the infinitely precise result can
1464 // essentially be arbitrarily wide; proving that double rounding
1465 // will not occur because the result of OpI is exact (as we will for
1466 // FMul, for example) is hopeless. However, we *can* nonetheless
1467 // frequently know that double rounding cannot occur (or that it is
1468 // innocuous) by taking advantage of the specific structure of
1469 // infinitely-precise results that admit double rounding.
1471 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1472 // to represent both sources, we can guarantee that the double
1473 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1474 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1475 // for proof of this fact).
1477 // Note: Figueroa does not consider the case where DstFormat !=
1478 // SrcFormat. It's possible (likely even!) that this analysis
1479 // could be tightened for those cases, but they are rare (the main
1480 // case of interest here is (float)((double)float + float)).
1481 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1482 if (LHSOrig->getType() != CI.getType())
1483 LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
1484 if (RHSOrig->getType() != CI.getType())
1485 RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
1487 BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig);
1488 RI->copyFastMathFlags(OpI);
1492 case Instruction::FMul:
1493 // For multiplication, the infinitely precise result has at most
1494 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1495 // that such a value can be exactly represented, then no double
1496 // rounding can possibly occur; we can safely perform the operation
1497 // in the destination format if it can represent both sources.
1498 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1499 if (LHSOrig->getType() != CI.getType())
1500 LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
1501 if (RHSOrig->getType() != CI.getType())
1502 RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
1504 BinaryOperator::CreateFMul(LHSOrig, RHSOrig);
1505 RI->copyFastMathFlags(OpI);
1509 case Instruction::FDiv:
1510 // For division, we use again use the bound from Figueroa's
1511 // dissertation. I am entirely certain that this bound can be
1512 // tightened in the unbalanced operand case by an analysis based on
1513 // the diophantine rational approximation bound, but the well-known
1514 // condition used here is a good conservative first pass.
1515 // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1516 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1517 if (LHSOrig->getType() != CI.getType())
1518 LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
1519 if (RHSOrig->getType() != CI.getType())
1520 RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
1522 BinaryOperator::CreateFDiv(LHSOrig, RHSOrig);
1523 RI->copyFastMathFlags(OpI);
1527 case Instruction::FRem:
1528 // Remainder is straightforward. Remainder is always exact, so the
1529 // type of OpI doesn't enter into things at all. We simply evaluate
1530 // in whichever source type is larger, then convert to the
1531 // destination type.
1532 if (SrcWidth == OpWidth)
1534 if (LHSWidth < SrcWidth)
1535 LHSOrig = Builder.CreateFPExt(LHSOrig, RHSOrig->getType());
1536 else if (RHSWidth <= SrcWidth)
1537 RHSOrig = Builder.CreateFPExt(RHSOrig, LHSOrig->getType());
1538 if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) {
1539 Value *ExactResult = Builder.CreateFRem(LHSOrig, RHSOrig);
1540 if (Instruction *RI = dyn_cast<Instruction>(ExactResult))
1541 RI->copyFastMathFlags(OpI);
1542 return CastInst::CreateFPCast(ExactResult, CI.getType());
1546 // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1547 if (BinaryOperator::isFNeg(OpI)) {
1548 Value *InnerTrunc = Builder.CreateFPTrunc(OpI->getOperand(1),
1550 Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc);
1551 RI->copyFastMathFlags(OpI);
1556 // (fptrunc (select cond, R1, Cst)) -->
1557 // (select cond, (fptrunc R1), (fptrunc Cst))
1559 // - but only if this isn't part of a min/max operation, else we'll
1560 // ruin min/max canonical form which is to have the select and
1561 // compare's operands be of the same type with no casts to look through.
1563 SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0));
1565 (isa<ConstantFP>(SI->getOperand(1)) ||
1566 isa<ConstantFP>(SI->getOperand(2))) &&
1567 matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) {
1568 Value *LHSTrunc = Builder.CreateFPTrunc(SI->getOperand(1), CI.getType());
1569 Value *RHSTrunc = Builder.CreateFPTrunc(SI->getOperand(2), CI.getType());
1570 return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc);
1573 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI.getOperand(0));
1575 switch (II->getIntrinsicID()) {
1577 case Intrinsic::fabs:
1578 case Intrinsic::ceil:
1579 case Intrinsic::floor:
1580 case Intrinsic::rint:
1581 case Intrinsic::round:
1582 case Intrinsic::nearbyint:
1583 case Intrinsic::trunc: {
1584 Value *Src = II->getArgOperand(0);
1585 if (!Src->hasOneUse())
1588 // Except for fabs, this transformation requires the input of the unary FP
1589 // operation to be itself an fpext from the type to which we're
1591 if (II->getIntrinsicID() != Intrinsic::fabs) {
1592 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1593 if (!FPExtSrc || FPExtSrc->getOperand(0)->getType() != CI.getType())
1597 // Do unary FP operation on smaller type.
1598 // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1599 Value *InnerTrunc = Builder.CreateFPTrunc(Src, CI.getType());
1600 Type *IntrinsicType[] = { CI.getType() };
1601 Function *Overload = Intrinsic::getDeclaration(
1602 CI.getModule(), II->getIntrinsicID(), IntrinsicType);
1604 SmallVector<OperandBundleDef, 1> OpBundles;
1605 II->getOperandBundlesAsDefs(OpBundles);
1607 Value *Args[] = { InnerTrunc };
1608 CallInst *NewCI = CallInst::Create(Overload, Args,
1609 OpBundles, II->getName());
1610 NewCI->copyFastMathFlags(II);
1616 if (Instruction *I = shrinkInsertElt(CI, Builder))
1622 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
1623 return commonCastTransforms(CI);
1626 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1627 // This is safe if the intermediate type has enough bits in its mantissa to
1628 // accurately represent all values of X. For example, this won't work with
1629 // i64 -> float -> i64.
1630 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) {
1631 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1633 Instruction *OpI = cast<Instruction>(FI.getOperand(0));
1635 Value *SrcI = OpI->getOperand(0);
1636 Type *FITy = FI.getType();
1637 Type *OpITy = OpI->getType();
1638 Type *SrcTy = SrcI->getType();
1639 bool IsInputSigned = isa<SIToFPInst>(OpI);
1640 bool IsOutputSigned = isa<FPToSIInst>(FI);
1642 // We can safely assume the conversion won't overflow the output range,
1643 // because (for example) (uint8_t)18293.f is undefined behavior.
1645 // Since we can assume the conversion won't overflow, our decision as to
1646 // whether the input will fit in the float should depend on the minimum
1647 // of the input range and output range.
1649 // This means this is also safe for a signed input and unsigned output, since
1650 // a negative input would lead to undefined behavior.
1651 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned;
1652 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned;
1653 int ActualSize = std::min(InputSize, OutputSize);
1655 if (ActualSize <= OpITy->getFPMantissaWidth()) {
1656 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) {
1657 if (IsInputSigned && IsOutputSigned)
1658 return new SExtInst(SrcI, FITy);
1659 return new ZExtInst(SrcI, FITy);
1661 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits())
1662 return new TruncInst(SrcI, FITy);
1664 return replaceInstUsesWith(FI, SrcI);
1665 return new BitCastInst(SrcI, FITy);
1670 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
1671 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
1673 return commonCastTransforms(FI);
1675 if (Instruction *I = FoldItoFPtoI(FI))
1678 return commonCastTransforms(FI);
1681 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
1682 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
1684 return commonCastTransforms(FI);
1686 if (Instruction *I = FoldItoFPtoI(FI))
1689 return commonCastTransforms(FI);
1692 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
1693 return commonCastTransforms(CI);
1696 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
1697 return commonCastTransforms(CI);
1700 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
1701 // If the source integer type is not the intptr_t type for this target, do a
1702 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
1703 // cast to be exposed to other transforms.
1704 unsigned AS = CI.getAddressSpace();
1705 if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
1706 DL.getPointerSizeInBits(AS)) {
1707 Type *Ty = DL.getIntPtrType(CI.getContext(), AS);
1708 if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
1709 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
1711 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
1712 return new IntToPtrInst(P, CI.getType());
1715 if (Instruction *I = commonCastTransforms(CI))
1721 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
1722 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
1723 Value *Src = CI.getOperand(0);
1725 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
1726 // If casting the result of a getelementptr instruction with no offset, turn
1727 // this into a cast of the original pointer!
1728 if (GEP->hasAllZeroIndices() &&
1729 // If CI is an addrspacecast and GEP changes the poiner type, merging
1730 // GEP into CI would undo canonicalizing addrspacecast with different
1731 // pointer types, causing infinite loops.
1732 (!isa<AddrSpaceCastInst>(CI) ||
1733 GEP->getType() == GEP->getPointerOperandType())) {
1734 // Changing the cast operand is usually not a good idea but it is safe
1735 // here because the pointer operand is being replaced with another
1736 // pointer operand so the opcode doesn't need to change.
1738 CI.setOperand(0, GEP->getOperand(0));
1743 return commonCastTransforms(CI);
1746 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
1747 // If the destination integer type is not the intptr_t type for this target,
1748 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
1749 // to be exposed to other transforms.
1751 Type *Ty = CI.getType();
1752 unsigned AS = CI.getPointerAddressSpace();
1754 if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
1755 return commonPointerCastTransforms(CI);
1757 Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);
1758 if (Ty->isVectorTy()) // Handle vectors of pointers.
1759 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
1761 Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy);
1762 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
1765 /// This input value (which is known to have vector type) is being zero extended
1766 /// or truncated to the specified vector type.
1767 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
1769 /// The source and destination vector types may have different element types.
1770 static Instruction *optimizeVectorResize(Value *InVal, VectorType *DestTy,
1772 // We can only do this optimization if the output is a multiple of the input
1773 // element size, or the input is a multiple of the output element size.
1774 // Convert the input type to have the same element type as the output.
1775 VectorType *SrcTy = cast<VectorType>(InVal->getType());
1777 if (SrcTy->getElementType() != DestTy->getElementType()) {
1778 // The input types don't need to be identical, but for now they must be the
1779 // same size. There is no specific reason we couldn't handle things like
1780 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
1782 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
1783 DestTy->getElementType()->getPrimitiveSizeInBits())
1786 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
1787 InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
1790 // Now that the element types match, get the shuffle mask and RHS of the
1791 // shuffle to use, which depends on whether we're increasing or decreasing the
1792 // size of the input.
1793 SmallVector<uint32_t, 16> ShuffleMask;
1796 if (SrcTy->getNumElements() > DestTy->getNumElements()) {
1797 // If we're shrinking the number of elements, just shuffle in the low
1798 // elements from the input and use undef as the second shuffle input.
1799 V2 = UndefValue::get(SrcTy);
1800 for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
1801 ShuffleMask.push_back(i);
1804 // If we're increasing the number of elements, shuffle in all of the
1805 // elements from InVal and fill the rest of the result elements with zeros
1806 // from a constant zero.
1807 V2 = Constant::getNullValue(SrcTy);
1808 unsigned SrcElts = SrcTy->getNumElements();
1809 for (unsigned i = 0, e = SrcElts; i != e; ++i)
1810 ShuffleMask.push_back(i);
1812 // The excess elements reference the first element of the zero input.
1813 for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
1814 ShuffleMask.push_back(SrcElts);
1817 return new ShuffleVectorInst(InVal, V2,
1818 ConstantDataVector::get(V2->getContext(),
1822 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
1823 return Value % Ty->getPrimitiveSizeInBits() == 0;
1826 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
1827 return Value / Ty->getPrimitiveSizeInBits();
1830 /// V is a value which is inserted into a vector of VecEltTy.
1831 /// Look through the value to see if we can decompose it into
1832 /// insertions into the vector. See the example in the comment for
1833 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
1834 /// The type of V is always a non-zero multiple of VecEltTy's size.
1835 /// Shift is the number of bits between the lsb of V and the lsb of
1838 /// This returns false if the pattern can't be matched or true if it can,
1839 /// filling in Elements with the elements found here.
1840 static bool collectInsertionElements(Value *V, unsigned Shift,
1841 SmallVectorImpl<Value *> &Elements,
1842 Type *VecEltTy, bool isBigEndian) {
1843 assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
1844 "Shift should be a multiple of the element type size");
1846 // Undef values never contribute useful bits to the result.
1847 if (isa<UndefValue>(V)) return true;
1849 // If we got down to a value of the right type, we win, try inserting into the
1851 if (V->getType() == VecEltTy) {
1852 // Inserting null doesn't actually insert any elements.
1853 if (Constant *C = dyn_cast<Constant>(V))
1854 if (C->isNullValue())
1857 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
1859 ElementIndex = Elements.size() - ElementIndex - 1;
1861 // Fail if multiple elements are inserted into this slot.
1862 if (Elements[ElementIndex])
1865 Elements[ElementIndex] = V;
1869 if (Constant *C = dyn_cast<Constant>(V)) {
1870 // Figure out the # elements this provides, and bitcast it or slice it up
1872 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
1874 // If the constant is the size of a vector element, we just need to bitcast
1875 // it to the right type so it gets properly inserted.
1877 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
1878 Shift, Elements, VecEltTy, isBigEndian);
1880 // Okay, this is a constant that covers multiple elements. Slice it up into
1881 // pieces and insert each element-sized piece into the vector.
1882 if (!isa<IntegerType>(C->getType()))
1883 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
1884 C->getType()->getPrimitiveSizeInBits()));
1885 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
1886 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
1888 for (unsigned i = 0; i != NumElts; ++i) {
1889 unsigned ShiftI = Shift+i*ElementSize;
1890 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
1892 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
1893 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
1900 if (!V->hasOneUse()) return false;
1902 Instruction *I = dyn_cast<Instruction>(V);
1903 if (!I) return false;
1904 switch (I->getOpcode()) {
1905 default: return false; // Unhandled case.
1906 case Instruction::BitCast:
1907 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1909 case Instruction::ZExt:
1910 if (!isMultipleOfTypeSize(
1911 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
1914 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1916 case Instruction::Or:
1917 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1919 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
1921 case Instruction::Shl: {
1922 // Must be shifting by a constant that is a multiple of the element size.
1923 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
1924 if (!CI) return false;
1925 Shift += CI->getZExtValue();
1926 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
1927 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1935 /// If the input is an 'or' instruction, we may be doing shifts and ors to
1936 /// assemble the elements of the vector manually.
1937 /// Try to rip the code out and replace it with insertelements. This is to
1938 /// optimize code like this:
1940 /// %tmp37 = bitcast float %inc to i32
1941 /// %tmp38 = zext i32 %tmp37 to i64
1942 /// %tmp31 = bitcast float %inc5 to i32
1943 /// %tmp32 = zext i32 %tmp31 to i64
1944 /// %tmp33 = shl i64 %tmp32, 32
1945 /// %ins35 = or i64 %tmp33, %tmp38
1946 /// %tmp43 = bitcast i64 %ins35 to <2 x float>
1948 /// Into two insertelements that do "buildvector{%inc, %inc5}".
1949 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
1951 VectorType *DestVecTy = cast<VectorType>(CI.getType());
1952 Value *IntInput = CI.getOperand(0);
1954 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
1955 if (!collectInsertionElements(IntInput, 0, Elements,
1956 DestVecTy->getElementType(),
1957 IC.getDataLayout().isBigEndian()))
1960 // If we succeeded, we know that all of the element are specified by Elements
1961 // or are zero if Elements has a null entry. Recast this as a set of
1963 Value *Result = Constant::getNullValue(CI.getType());
1964 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
1965 if (!Elements[i]) continue; // Unset element.
1967 Result = IC.Builder.CreateInsertElement(Result, Elements[i],
1968 IC.Builder.getInt32(i));
1974 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
1975 /// vector followed by extract element. The backend tends to handle bitcasts of
1976 /// vectors better than bitcasts of scalars because vector registers are
1977 /// usually not type-specific like scalar integer or scalar floating-point.
1978 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
1980 // TODO: Create and use a pattern matcher for ExtractElementInst.
1981 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
1982 if (!ExtElt || !ExtElt->hasOneUse())
1985 // The bitcast must be to a vectorizable type, otherwise we can't make a new
1986 // type to extract from.
1987 Type *DestType = BitCast.getType();
1988 if (!VectorType::isValidElementType(DestType))
1991 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements();
1992 auto *NewVecType = VectorType::get(DestType, NumElts);
1993 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
1995 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
1998 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
1999 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
2000 InstCombiner::BuilderTy &Builder) {
2001 Type *DestTy = BitCast.getType();
2003 if (!DestTy->isIntOrIntVectorTy() ||
2004 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2005 !BO->isBitwiseLogicOp())
2008 // FIXME: This transform is restricted to vector types to avoid backend
2009 // problems caused by creating potentially illegal operations. If a fix-up is
2010 // added to handle that situation, we can remove this check.
2011 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2015 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2016 X->getType() == DestTy && !isa<Constant>(X)) {
2017 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2018 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2019 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2022 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2023 X->getType() == DestTy && !isa<Constant>(X)) {
2024 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2025 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2026 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2029 // Canonicalize vector bitcasts to come before vector bitwise logic with a
2030 // constant. This eases recognition of special constants for later ops.
2032 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2034 if (match(BO->getOperand(1), m_Constant(C))) {
2035 // bitcast (logic X, C) --> logic (bitcast X, C')
2036 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2037 Value *CastedC = ConstantExpr::getBitCast(C, DestTy);
2038 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2044 /// Change the type of a select if we can eliminate a bitcast.
2045 static Instruction *foldBitCastSelect(BitCastInst &BitCast,
2046 InstCombiner::BuilderTy &Builder) {
2047 Value *Cond, *TVal, *FVal;
2048 if (!match(BitCast.getOperand(0),
2049 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2052 // A vector select must maintain the same number of elements in its operands.
2053 Type *CondTy = Cond->getType();
2054 Type *DestTy = BitCast.getType();
2055 if (CondTy->isVectorTy()) {
2056 if (!DestTy->isVectorTy())
2058 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements())
2062 // FIXME: This transform is restricted from changing the select between
2063 // scalars and vectors to avoid backend problems caused by creating
2064 // potentially illegal operations. If a fix-up is added to handle that
2065 // situation, we can remove this check.
2066 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2069 auto *Sel = cast<Instruction>(BitCast.getOperand(0));
2071 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2072 !isa<Constant>(X)) {
2073 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2074 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2075 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2078 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2079 !isa<Constant>(X)) {
2080 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2081 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2082 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2088 /// Check if all users of CI are StoreInsts.
2089 static bool hasStoreUsersOnly(CastInst &CI) {
2090 for (User *U : CI.users()) {
2091 if (!isa<StoreInst>(U))
2097 /// This function handles following case
2103 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2104 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
2105 Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
2106 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2107 if (hasStoreUsersOnly(CI))
2110 Value *Src = CI.getOperand(0);
2111 Type *SrcTy = Src->getType(); // Type B
2112 Type *DestTy = CI.getType(); // Type A
2114 SmallVector<PHINode *, 4> PhiWorklist;
2115 SmallSetVector<PHINode *, 4> OldPhiNodes;
2117 // Find all of the A->B casts and PHI nodes.
2118 // We need to inpect all related PHI nodes, but PHIs can be cyclic, so
2119 // OldPhiNodes is used to track all known PHI nodes, before adding a new
2120 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2121 PhiWorklist.push_back(PN);
2122 OldPhiNodes.insert(PN);
2123 while (!PhiWorklist.empty()) {
2124 auto *OldPN = PhiWorklist.pop_back_val();
2125 for (Value *IncValue : OldPN->incoming_values()) {
2126 if (isa<Constant>(IncValue))
2129 if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
2130 // If there is a sequence of one or more load instructions, each loaded
2131 // value is used as address of later load instruction, bitcast is
2132 // necessary to change the value type, don't optimize it. For
2133 // simplicity we give up if the load address comes from another load.
2134 Value *Addr = LI->getOperand(0);
2135 if (Addr == &CI || isa<LoadInst>(Addr))
2137 if (LI->hasOneUse() && LI->isSimple())
2139 // If a LoadInst has more than one use, changing the type of loaded
2140 // value may create another bitcast.
2144 if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
2145 if (OldPhiNodes.insert(PNode))
2146 PhiWorklist.push_back(PNode);
2150 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2151 // We can't handle other instructions.
2155 // Verify it's a A->B cast.
2156 Type *TyA = BCI->getOperand(0)->getType();
2157 Type *TyB = BCI->getType();
2158 if (TyA != DestTy || TyB != SrcTy)
2163 // For each old PHI node, create a corresponding new PHI node with a type A.
2164 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2165 for (auto *OldPN : OldPhiNodes) {
2166 Builder.SetInsertPoint(OldPN);
2167 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2168 NewPNodes[OldPN] = NewPN;
2171 // Fill in the operands of new PHI nodes.
2172 for (auto *OldPN : OldPhiNodes) {
2173 PHINode *NewPN = NewPNodes[OldPN];
2174 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2175 Value *V = OldPN->getOperand(j);
2176 Value *NewV = nullptr;
2177 if (auto *C = dyn_cast<Constant>(V)) {
2178 NewV = ConstantExpr::getBitCast(C, DestTy);
2179 } else if (auto *LI = dyn_cast<LoadInst>(V)) {
2180 Builder.SetInsertPoint(LI->getNextNode());
2181 NewV = Builder.CreateBitCast(LI, DestTy);
2183 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2184 NewV = BCI->getOperand(0);
2185 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
2186 NewV = NewPNodes[PrevPN];
2189 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2193 // If there is a store with type B, change it to type A.
2194 for (User *U : PN->users()) {
2195 auto *SI = dyn_cast<StoreInst>(U);
2196 if (SI && SI->isSimple() && SI->getOperand(0) == PN) {
2197 Builder.SetInsertPoint(SI);
2199 cast<BitCastInst>(Builder.CreateBitCast(NewPNodes[PN], SrcTy));
2200 SI->setOperand(0, NewBC);
2202 assert(hasStoreUsersOnly(*NewBC));
2206 return replaceInstUsesWith(CI, NewPNodes[PN]);
2209 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
2210 // If the operands are integer typed then apply the integer transforms,
2211 // otherwise just apply the common ones.
2212 Value *Src = CI.getOperand(0);
2213 Type *SrcTy = Src->getType();
2214 Type *DestTy = CI.getType();
2216 // Get rid of casts from one type to the same type. These are useless and can
2217 // be replaced by the operand.
2218 if (DestTy == Src->getType())
2219 return replaceInstUsesWith(CI, Src);
2221 if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
2222 PointerType *SrcPTy = cast<PointerType>(SrcTy);
2223 Type *DstElTy = DstPTy->getElementType();
2224 Type *SrcElTy = SrcPTy->getElementType();
2226 // If we are casting a alloca to a pointer to a type of the same
2227 // size, rewrite the allocation instruction to allocate the "right" type.
2228 // There is no need to modify malloc calls because it is their bitcast that
2229 // needs to be cleaned up.
2230 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
2231 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
2234 // When the type pointed to is not sized the cast cannot be
2235 // turned into a gep.
2237 cast<PointerType>(Src->getType()->getScalarType())->getElementType();
2238 if (!PointeeType->isSized())
2241 // If the source and destination are pointers, and this cast is equivalent
2242 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
2243 // This can enhance SROA and other transforms that want type-safe pointers.
2244 unsigned NumZeros = 0;
2245 while (SrcElTy != DstElTy &&
2246 isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
2247 SrcElTy->getNumContainedTypes() /* not "{}" */) {
2248 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U);
2252 // If we found a path from the src to dest, create the getelementptr now.
2253 if (SrcElTy == DstElTy) {
2254 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2255 return GetElementPtrInst::CreateInBounds(Src, Idxs);
2259 if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
2260 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
2261 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2262 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
2263 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2264 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
2267 if (isa<IntegerType>(SrcTy)) {
2268 // If this is a cast from an integer to vector, check to see if the input
2269 // is a trunc or zext of a bitcast from vector. If so, we can replace all
2270 // the casts with a shuffle and (potentially) a bitcast.
2271 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2272 CastInst *SrcCast = cast<CastInst>(Src);
2273 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2274 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2275 if (Instruction *I = optimizeVectorResize(BCIn->getOperand(0),
2276 cast<VectorType>(DestTy), *this))
2280 // If the input is an 'or' instruction, we may be doing shifts and ors to
2281 // assemble the elements of the vector manually. Try to rip the code out
2282 // and replace it with insertelements.
2283 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
2284 return replaceInstUsesWith(CI, V);
2288 if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
2289 if (SrcVTy->getNumElements() == 1) {
2290 // If our destination is not a vector, then make this a straight
2291 // scalar-scalar cast.
2292 if (!DestTy->isVectorTy()) {
2294 Builder.CreateExtractElement(Src,
2295 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2296 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2299 // Otherwise, see if our source is an insert. If so, then use the scalar
2300 // component directly.
2301 if (InsertElementInst *IEI =
2302 dyn_cast<InsertElementInst>(CI.getOperand(0)))
2303 return CastInst::Create(Instruction::BitCast, IEI->getOperand(1),
2308 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
2309 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
2310 // a bitcast to a vector with the same # elts.
2311 if (SVI->hasOneUse() && DestTy->isVectorTy() &&
2312 DestTy->getVectorNumElements() == SVI->getType()->getNumElements() &&
2313 SVI->getType()->getNumElements() ==
2314 SVI->getOperand(0)->getType()->getVectorNumElements()) {
2316 // If either of the operands is a cast from CI.getType(), then
2317 // evaluating the shuffle in the casted destination's type will allow
2318 // us to eliminate at least one cast.
2319 if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
2320 Tmp->getOperand(0)->getType() == DestTy) ||
2321 ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
2322 Tmp->getOperand(0)->getType() == DestTy)) {
2323 Value *LHS = Builder.CreateBitCast(SVI->getOperand(0), DestTy);
2324 Value *RHS = Builder.CreateBitCast(SVI->getOperand(1), DestTy);
2325 // Return a new shuffle vector. Use the same element ID's, as we
2326 // know the vector types match #elts.
2327 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
2332 // Handle the A->B->A cast, and there is an intervening PHI node.
2333 if (PHINode *PN = dyn_cast<PHINode>(Src))
2334 if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
2337 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
2340 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
2343 if (Instruction *I = foldBitCastSelect(CI, Builder))
2346 if (SrcTy->isPointerTy())
2347 return commonPointerCastTransforms(CI);
2348 return commonCastTransforms(CI);
2351 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
2352 // If the destination pointer element type is not the same as the source's
2353 // first do a bitcast to the destination type, and then the addrspacecast.
2354 // This allows the cast to be exposed to other transforms.
2355 Value *Src = CI.getOperand(0);
2356 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2357 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
2359 Type *DestElemTy = DestTy->getElementType();
2360 if (SrcTy->getElementType() != DestElemTy) {
2361 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
2362 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) {
2363 // Handle vectors of pointers.
2364 MidTy = VectorType::get(MidTy, VT->getNumElements());
2367 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
2368 return new AddrSpaceCastInst(NewBitCast, CI.getType());
2371 return commonPointerCastTransforms(CI);