1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains logic for simplifying instructions based on information
11 // about how they are used.
13 //===----------------------------------------------------------------------===//
15 #include "InstCombineInternal.h"
16 #include "llvm/Analysis/ValueTracking.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Support/KnownBits.h"
22 using namespace llvm::PatternMatch;
24 #define DEBUG_TYPE "instcombine"
28 struct AMDGPUImageDMaskIntrinsic {
32 #define GET_AMDGPUImageDMaskIntrinsicTable_IMPL
33 #include "InstCombineTables.inc"
35 } // end anonymous namespace
37 /// Check to see if the specified operand of the specified instruction is a
38 /// constant integer. If so, check to see if there are any bits set in the
39 /// constant that are not demanded. If so, shrink the constant and return true.
40 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
41 const APInt &Demanded) {
42 assert(I && "No instruction?");
43 assert(OpNo < I->getNumOperands() && "Operand index too large");
45 // The operand must be a constant integer or splat integer.
46 Value *Op = I->getOperand(OpNo);
48 if (!match(Op, m_APInt(C)))
51 // If there are no bits set that aren't demanded, nothing to do.
52 if (C->isSubsetOf(Demanded))
55 // This instruction is producing bits that are not demanded. Shrink the RHS.
56 I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
63 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if
64 /// the instruction has any properties that allow us to simplify its operands.
65 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
66 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
67 KnownBits Known(BitWidth);
68 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
70 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
73 if (V == &Inst) return true;
74 replaceInstUsesWith(Inst, V);
78 /// This form of SimplifyDemandedBits simplifies the specified instruction
79 /// operand if possible, updating it in place. It returns true if it made any
80 /// change and false otherwise.
81 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
82 const APInt &DemandedMask,
85 Use &U = I->getOperandUse(OpNo);
86 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known,
88 if (!NewVal) return false;
94 /// This function attempts to replace V with a simpler value based on the
95 /// demanded bits. When this function is called, it is known that only the bits
96 /// set in DemandedMask of the result of V are ever used downstream.
97 /// Consequently, depending on the mask and V, it may be possible to replace V
98 /// with a constant or one of its operands. In such cases, this function does
99 /// the replacement and returns true. In all other cases, it returns false after
100 /// analyzing the expression and setting KnownOne and known to be one in the
101 /// expression. Known.Zero contains all the bits that are known to be zero in
102 /// the expression. These are provided to potentially allow the caller (which
103 /// might recursively be SimplifyDemandedBits itself) to simplify the
105 /// Known.One and Known.Zero always follow the invariant that:
106 /// Known.One & Known.Zero == 0.
107 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
108 /// Known.Zero may only be accurate for those bits set in DemandedMask. Note
109 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all
112 /// This returns null if it did not change anything and it permits no
113 /// simplification. This returns V itself if it did some simplification of V's
114 /// operands based on the information about what bits are demanded. This returns
115 /// some other non-null value if it found out that V is equal to another value
116 /// in the context where the specified bits are demanded, but not for all users.
117 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
118 KnownBits &Known, unsigned Depth,
120 assert(V != nullptr && "Null pointer of Value???");
121 assert(Depth <= 6 && "Limit Search Depth");
122 uint32_t BitWidth = DemandedMask.getBitWidth();
123 Type *VTy = V->getType();
125 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
126 Known.getBitWidth() == BitWidth &&
127 "Value *V, DemandedMask and Known must have same BitWidth");
129 if (isa<Constant>(V)) {
130 computeKnownBits(V, Known, Depth, CxtI);
135 if (DemandedMask.isNullValue()) // Not demanding any bits from V.
136 return UndefValue::get(VTy);
138 if (Depth == 6) // Limit search depth.
141 Instruction *I = dyn_cast<Instruction>(V);
143 computeKnownBits(V, Known, Depth, CxtI);
144 return nullptr; // Only analyze instructions.
147 // If there are multiple uses of this value and we aren't at the root, then
148 // we can't do any simplifications of the operands, because DemandedMask
149 // only reflects the bits demanded by *one* of the users.
150 if (Depth != 0 && !I->hasOneUse())
151 return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
153 KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
155 // If this is the root being simplified, allow it to have multiple uses,
156 // just set the DemandedMask to all bits so that we can try to simplify the
157 // operands. This allows visitTruncInst (for example) to simplify the
158 // operand of a trunc without duplicating all the logic below.
159 if (Depth == 0 && !V->hasOneUse())
160 DemandedMask.setAllBits();
162 switch (I->getOpcode()) {
164 computeKnownBits(I, Known, Depth, CxtI);
166 case Instruction::And: {
167 // If either the LHS or the RHS are Zero, the result is zero.
168 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
169 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
172 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
173 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
175 // Output known-0 are known to be clear if zero in either the LHS | RHS.
176 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
177 // Output known-1 bits are only known if set in both the LHS & RHS.
178 APInt IKnownOne = RHSKnown.One & LHSKnown.One;
180 // If the client is only demanding bits that we know, return the known
182 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
183 return Constant::getIntegerValue(VTy, IKnownOne);
185 // If all of the demanded bits are known 1 on one side, return the other.
186 // These bits cannot contribute to the result of the 'and'.
187 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
188 return I->getOperand(0);
189 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
190 return I->getOperand(1);
192 // If the RHS is a constant, see if we can simplify it.
193 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero))
196 Known.Zero = std::move(IKnownZero);
197 Known.One = std::move(IKnownOne);
200 case Instruction::Or: {
201 // If either the LHS or the RHS are One, the result is One.
202 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
203 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
206 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
207 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
209 // Output known-0 bits are only known if clear in both the LHS & RHS.
210 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
211 // Output known-1 are known. to be set if s.et in either the LHS | RHS.
212 APInt IKnownOne = RHSKnown.One | LHSKnown.One;
214 // If the client is only demanding bits that we know, return the known
216 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
217 return Constant::getIntegerValue(VTy, IKnownOne);
219 // If all of the demanded bits are known zero on one side, return the other.
220 // These bits cannot contribute to the result of the 'or'.
221 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
222 return I->getOperand(0);
223 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
224 return I->getOperand(1);
226 // If the RHS is a constant, see if we can simplify it.
227 if (ShrinkDemandedConstant(I, 1, DemandedMask))
230 Known.Zero = std::move(IKnownZero);
231 Known.One = std::move(IKnownOne);
234 case Instruction::Xor: {
235 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
236 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
238 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
239 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
241 // Output known-0 bits are known if clear or set in both the LHS & RHS.
242 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
243 (RHSKnown.One & LHSKnown.One);
244 // Output known-1 are known to be set if set in only one of the LHS, RHS.
245 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
246 (RHSKnown.One & LHSKnown.Zero);
248 // If the client is only demanding bits that we know, return the known
250 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
251 return Constant::getIntegerValue(VTy, IKnownOne);
253 // If all of the demanded bits are known zero on one side, return the other.
254 // These bits cannot contribute to the result of the 'xor'.
255 if (DemandedMask.isSubsetOf(RHSKnown.Zero))
256 return I->getOperand(0);
257 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
258 return I->getOperand(1);
260 // If all of the demanded bits are known to be zero on one side or the
261 // other, turn this into an *inclusive* or.
262 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
263 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
265 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
267 return InsertNewInstWith(Or, *I);
270 // If all of the demanded bits on one side are known, and all of the set
271 // bits on that side are also known to be set on the other side, turn this
272 // into an AND, as we know the bits will be cleared.
273 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
274 if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
275 RHSKnown.One.isSubsetOf(LHSKnown.One)) {
276 Constant *AndC = Constant::getIntegerValue(VTy,
277 ~RHSKnown.One & DemandedMask);
278 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
279 return InsertNewInstWith(And, *I);
282 // If the RHS is a constant, see if we can simplify it.
283 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
284 if (ShrinkDemandedConstant(I, 1, DemandedMask))
287 // If our LHS is an 'and' and if it has one use, and if any of the bits we
288 // are flipping are known to be set, then the xor is just resetting those
289 // bits to zero. We can just knock out bits from the 'and' and the 'xor',
290 // simplifying both of them.
291 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
292 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
293 isa<ConstantInt>(I->getOperand(1)) &&
294 isa<ConstantInt>(LHSInst->getOperand(1)) &&
295 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
296 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
297 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
298 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
301 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
302 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
303 InsertNewInstWith(NewAnd, *I);
306 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
307 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
308 return InsertNewInstWith(NewXor, *I);
311 // Output known-0 bits are known if clear or set in both the LHS & RHS.
312 Known.Zero = std::move(IKnownZero);
313 // Output known-1 are known to be set if set in only one of the LHS, RHS.
314 Known.One = std::move(IKnownOne);
317 case Instruction::Select: {
319 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
320 if (SPF == SPF_UMAX) {
321 // UMax(A, C) == A if ...
322 // The lowest non-zero bit of DemandMask is higher than the highest
323 // non-zero bit of C.
325 unsigned CTZ = DemandedMask.countTrailingZeros();
326 if (match(RHS, m_APInt(C)) && CTZ >= C->getActiveBits())
328 } else if (SPF == SPF_UMIN) {
329 // UMin(A, C) == A if ...
330 // The lowest non-zero bit of DemandMask is higher than the highest
332 // This comes from using DeMorgans on the above umax example.
334 unsigned CTZ = DemandedMask.countTrailingZeros();
335 if (match(RHS, m_APInt(C)) &&
336 CTZ >= C->getBitWidth() - C->countLeadingOnes())
340 // If this is a select as part of any other min/max pattern, don't simplify
341 // any further in case we break the structure.
342 if (SPF != SPF_UNKNOWN)
345 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
346 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
348 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
349 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
351 // If the operands are constants, see if we can simplify them.
352 if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
353 ShrinkDemandedConstant(I, 2, DemandedMask))
356 // Only known if known in both the LHS and RHS.
357 Known.One = RHSKnown.One & LHSKnown.One;
358 Known.Zero = RHSKnown.Zero & LHSKnown.Zero;
361 case Instruction::ZExt:
362 case Instruction::Trunc: {
363 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
365 APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
366 KnownBits InputKnown(SrcBitWidth);
367 if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1))
369 Known = InputKnown.zextOrTrunc(BitWidth);
370 // Any top bits are known to be zero.
371 if (BitWidth > SrcBitWidth)
372 Known.Zero.setBitsFrom(SrcBitWidth);
373 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
376 case Instruction::BitCast:
377 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
378 return nullptr; // vector->int or fp->int?
380 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
381 if (VectorType *SrcVTy =
382 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
383 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
384 // Don't touch a bitcast between vectors of different element counts.
387 // Don't touch a scalar-to-vector bitcast.
389 } else if (I->getOperand(0)->getType()->isVectorTy())
390 // Don't touch a vector-to-scalar bitcast.
393 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
395 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
397 case Instruction::SExt: {
398 // Compute the bits in the result that are not present in the input.
399 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
401 APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth);
403 // If any of the sign extended bits are demanded, we know that the sign
405 if (DemandedMask.getActiveBits() > SrcBitWidth)
406 InputDemandedBits.setBit(SrcBitWidth-1);
408 KnownBits InputKnown(SrcBitWidth);
409 if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1))
412 // If the input sign bit is known zero, or if the NewBits are not demanded
413 // convert this into a zero extension.
414 if (InputKnown.isNonNegative() ||
415 DemandedMask.getActiveBits() <= SrcBitWidth) {
416 // Convert to ZExt cast.
417 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
418 return InsertNewInstWith(NewCast, *I);
421 // If the sign bit of the input is known set or clear, then we know the
422 // top bits of the result.
423 Known = InputKnown.sext(BitWidth);
424 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
427 case Instruction::Add:
428 case Instruction::Sub: {
429 /// If the high-bits of an ADD/SUB are not demanded, then we do not care
430 /// about the high bits of the operands.
431 unsigned NLZ = DemandedMask.countLeadingZeros();
432 // Right fill the mask of bits for this ADD/SUB to demand the most
433 // significant bit and all those below it.
434 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
435 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
436 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
437 ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
438 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
440 // Disable the nsw and nuw flags here: We can no longer guarantee that
441 // we won't wrap after simplification. Removing the nsw/nuw flags is
442 // legal here because the top bit is not demanded.
443 BinaryOperator &BinOP = *cast<BinaryOperator>(I);
444 BinOP.setHasNoSignedWrap(false);
445 BinOP.setHasNoUnsignedWrap(false);
450 // If we are known to be adding/subtracting zeros to every bit below
451 // the highest demanded bit, we just return the other side.
452 if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
453 return I->getOperand(0);
454 // We can't do this with the LHS for subtraction, unless we are only
455 // demanding the LSB.
456 if ((I->getOpcode() == Instruction::Add ||
457 DemandedFromOps.isOneValue()) &&
458 DemandedFromOps.isSubsetOf(LHSKnown.Zero))
459 return I->getOperand(1);
461 // Otherwise just compute the known bits of the result.
462 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
463 Known = KnownBits::computeForAddSub(I->getOpcode() == Instruction::Add,
464 NSW, LHSKnown, RHSKnown);
467 case Instruction::Shl: {
469 if (match(I->getOperand(1), m_APInt(SA))) {
471 if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt))))
472 if (Instruction *Shr = dyn_cast<Instruction>(I->getOperand(0)))
473 if (Value *R = simplifyShrShlDemandedBits(Shr, *ShrAmt, I, *SA,
474 DemandedMask, Known))
477 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
478 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
480 // If the shift is NUW/NSW, then it does demand the high bits.
481 ShlOperator *IOp = cast<ShlOperator>(I);
482 if (IOp->hasNoSignedWrap())
483 DemandedMaskIn.setHighBits(ShiftAmt+1);
484 else if (IOp->hasNoUnsignedWrap())
485 DemandedMaskIn.setHighBits(ShiftAmt);
487 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
489 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
490 Known.Zero <<= ShiftAmt;
491 Known.One <<= ShiftAmt;
492 // low bits known zero.
494 Known.Zero.setLowBits(ShiftAmt);
498 case Instruction::LShr: {
500 if (match(I->getOperand(1), m_APInt(SA))) {
501 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
503 // Unsigned shift right.
504 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
506 // If the shift is exact, then it does demand the low bits (and knows that
508 if (cast<LShrOperator>(I)->isExact())
509 DemandedMaskIn.setLowBits(ShiftAmt);
511 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
513 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
514 Known.Zero.lshrInPlace(ShiftAmt);
515 Known.One.lshrInPlace(ShiftAmt);
517 Known.Zero.setHighBits(ShiftAmt); // high bits known zero.
521 case Instruction::AShr: {
522 // If this is an arithmetic shift right and only the low-bit is set, we can
523 // always convert this into a logical shr, even if the shift amount is
524 // variable. The low bit of the shift cannot be an input sign bit unless
525 // the shift amount is >= the size of the datatype, which is undefined.
526 if (DemandedMask.isOneValue()) {
527 // Perform the logical shift right.
528 Instruction *NewVal = BinaryOperator::CreateLShr(
529 I->getOperand(0), I->getOperand(1), I->getName());
530 return InsertNewInstWith(NewVal, *I);
533 // If the sign bit is the only bit demanded by this ashr, then there is no
534 // need to do it, the shift doesn't change the high bit.
535 if (DemandedMask.isSignMask())
536 return I->getOperand(0);
539 if (match(I->getOperand(1), m_APInt(SA))) {
540 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
542 // Signed shift right.
543 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
544 // If any of the high bits are demanded, we should set the sign bit as
546 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
547 DemandedMaskIn.setSignBit();
549 // If the shift is exact, then it does demand the low bits (and knows that
551 if (cast<AShrOperator>(I)->isExact())
552 DemandedMaskIn.setLowBits(ShiftAmt);
554 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
557 unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Depth + 1, CxtI);
559 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
560 // Compute the new bits that are at the top now plus sign bits.
561 APInt HighBits(APInt::getHighBitsSet(
562 BitWidth, std::min(SignBits + ShiftAmt - 1, BitWidth)));
563 Known.Zero.lshrInPlace(ShiftAmt);
564 Known.One.lshrInPlace(ShiftAmt);
566 // If the input sign bit is known to be zero, or if none of the top bits
567 // are demanded, turn this into an unsigned shift right.
568 assert(BitWidth > ShiftAmt && "Shift amount not saturated?");
569 if (Known.Zero[BitWidth-ShiftAmt-1] ||
570 !DemandedMask.intersects(HighBits)) {
571 BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
573 LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
574 return InsertNewInstWith(LShr, *I);
575 } else if (Known.One[BitWidth-ShiftAmt-1]) { // New bits are known one.
576 Known.One |= HighBits;
581 case Instruction::UDiv: {
582 // UDiv doesn't demand low bits that are zero in the divisor.
584 if (match(I->getOperand(1), m_APInt(SA))) {
585 // If the shift is exact, then it does demand the low bits.
586 if (cast<UDivOperator>(I)->isExact())
589 // FIXME: Take the demanded mask of the result into account.
590 unsigned RHSTrailingZeros = SA->countTrailingZeros();
591 APInt DemandedMaskIn =
592 APInt::getHighBitsSet(BitWidth, BitWidth - RHSTrailingZeros);
593 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Depth + 1))
596 // Propagate zero bits from the input.
597 Known.Zero.setHighBits(std::min(
598 BitWidth, LHSKnown.Zero.countLeadingOnes() + RHSTrailingZeros));
602 case Instruction::SRem:
603 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
604 // X % -1 demands all the bits because we don't want to introduce
605 // INT_MIN % -1 (== undef) by accident.
606 if (Rem->isMinusOne())
608 APInt RA = Rem->getValue().abs();
609 if (RA.isPowerOf2()) {
610 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
611 return I->getOperand(0);
613 APInt LowBits = RA - 1;
614 APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
615 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
618 // The low bits of LHS are unchanged by the srem.
619 Known.Zero = LHSKnown.Zero & LowBits;
620 Known.One = LHSKnown.One & LowBits;
622 // If LHS is non-negative or has all low bits zero, then the upper bits
624 if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero))
625 Known.Zero |= ~LowBits;
627 // If LHS is negative and not all low bits are zero, then the upper bits
629 if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One))
630 Known.One |= ~LowBits;
632 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
637 // The sign bit is the LHS's sign bit, except when the result of the
638 // remainder is zero.
639 if (DemandedMask.isSignBitSet()) {
640 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
641 // If it's known zero, our sign bit is also zero.
642 if (LHSKnown.isNonNegative())
643 Known.makeNonNegative();
646 case Instruction::URem: {
647 KnownBits Known2(BitWidth);
648 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
649 if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
650 SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
653 unsigned Leaders = Known2.countMinLeadingZeros();
654 Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
657 case Instruction::Call:
658 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
659 switch (II->getIntrinsicID()) {
661 case Intrinsic::bswap: {
662 // If the only bits demanded come from one byte of the bswap result,
663 // just shift the input byte into position to eliminate the bswap.
664 unsigned NLZ = DemandedMask.countLeadingZeros();
665 unsigned NTZ = DemandedMask.countTrailingZeros();
667 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
668 // we need all the bits down to bit 8. Likewise, round NLZ. If we
669 // have 14 leading zeros, round to 8.
672 // If we need exactly one byte, we can do this transformation.
673 if (BitWidth-NLZ-NTZ == 8) {
674 unsigned ResultBit = NTZ;
675 unsigned InputBit = BitWidth-NTZ-8;
677 // Replace this with either a left or right shift to get the byte into
680 if (InputBit > ResultBit)
681 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
682 ConstantInt::get(I->getType(), InputBit-ResultBit));
684 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
685 ConstantInt::get(I->getType(), ResultBit-InputBit));
687 return InsertNewInstWith(NewVal, *I);
690 // TODO: Could compute known zero/one bits based on the input.
693 case Intrinsic::fshr:
694 case Intrinsic::fshl: {
696 if (!match(I->getOperand(2), m_APInt(SA)))
699 // Normalize to funnel shift left. APInt shifts of BitWidth are well-
700 // defined, so no need to special-case zero shifts here.
701 uint64_t ShiftAmt = SA->urem(BitWidth);
702 if (II->getIntrinsicID() == Intrinsic::fshr)
703 ShiftAmt = BitWidth - ShiftAmt;
705 APInt DemandedMaskLHS(DemandedMask.lshr(ShiftAmt));
706 APInt DemandedMaskRHS(DemandedMask.shl(BitWidth - ShiftAmt));
707 if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Depth + 1) ||
708 SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Depth + 1))
711 Known.Zero = LHSKnown.Zero.shl(ShiftAmt) |
712 RHSKnown.Zero.lshr(BitWidth - ShiftAmt);
713 Known.One = LHSKnown.One.shl(ShiftAmt) |
714 RHSKnown.One.lshr(BitWidth - ShiftAmt);
717 case Intrinsic::x86_mmx_pmovmskb:
718 case Intrinsic::x86_sse_movmsk_ps:
719 case Intrinsic::x86_sse2_movmsk_pd:
720 case Intrinsic::x86_sse2_pmovmskb_128:
721 case Intrinsic::x86_avx_movmsk_ps_256:
722 case Intrinsic::x86_avx_movmsk_pd_256:
723 case Intrinsic::x86_avx2_pmovmskb: {
724 // MOVMSK copies the vector elements' sign bits to the low bits
725 // and zeros the high bits.
727 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) {
728 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>.
730 auto Arg = II->getArgOperand(0);
731 auto ArgType = cast<VectorType>(Arg->getType());
732 ArgWidth = ArgType->getNumElements();
735 // If we don't need any of low bits then return zero,
736 // we know that DemandedMask is non-zero already.
737 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth);
738 if (DemandedElts.isNullValue())
739 return ConstantInt::getNullValue(VTy);
741 // We know that the upper bits are set to zero.
742 Known.Zero.setBitsFrom(ArgWidth);
745 case Intrinsic::x86_sse42_crc32_64_64:
746 Known.Zero.setBitsFrom(32);
750 computeKnownBits(V, Known, Depth, CxtI);
754 // If the client is only demanding bits that we know, return the known
756 if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
757 return Constant::getIntegerValue(VTy, Known.One);
761 /// Helper routine of SimplifyDemandedUseBits. It computes Known
762 /// bits. It also tries to handle simplifications that can be done based on
763 /// DemandedMask, but without modifying the Instruction.
764 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
765 const APInt &DemandedMask,
769 unsigned BitWidth = DemandedMask.getBitWidth();
770 Type *ITy = I->getType();
772 KnownBits LHSKnown(BitWidth);
773 KnownBits RHSKnown(BitWidth);
775 // Despite the fact that we can't simplify this instruction in all User's
776 // context, we can at least compute the known bits, and we can
777 // do simplifications that apply to *just* the one user if we know that
778 // this instruction has a simpler value in that context.
779 switch (I->getOpcode()) {
780 case Instruction::And: {
781 // If either the LHS or the RHS are Zero, the result is zero.
782 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
783 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
786 // Output known-0 are known to be clear if zero in either the LHS | RHS.
787 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
788 // Output known-1 bits are only known if set in both the LHS & RHS.
789 APInt IKnownOne = RHSKnown.One & LHSKnown.One;
791 // If the client is only demanding bits that we know, return the known
793 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
794 return Constant::getIntegerValue(ITy, IKnownOne);
796 // If all of the demanded bits are known 1 on one side, return the other.
797 // These bits cannot contribute to the result of the 'and' in this
799 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
800 return I->getOperand(0);
801 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
802 return I->getOperand(1);
804 Known.Zero = std::move(IKnownZero);
805 Known.One = std::move(IKnownOne);
808 case Instruction::Or: {
809 // We can simplify (X|Y) -> X or Y in the user's context if we know that
810 // only bits from X or Y are demanded.
812 // If either the LHS or the RHS are One, the result is One.
813 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
814 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
817 // Output known-0 bits are only known if clear in both the LHS & RHS.
818 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
819 // Output known-1 are known to be set if set in either the LHS | RHS.
820 APInt IKnownOne = RHSKnown.One | LHSKnown.One;
822 // If the client is only demanding bits that we know, return the known
824 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
825 return Constant::getIntegerValue(ITy, IKnownOne);
827 // If all of the demanded bits are known zero on one side, return the
828 // other. These bits cannot contribute to the result of the 'or' in this
830 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
831 return I->getOperand(0);
832 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
833 return I->getOperand(1);
835 Known.Zero = std::move(IKnownZero);
836 Known.One = std::move(IKnownOne);
839 case Instruction::Xor: {
840 // We can simplify (X^Y) -> X or Y in the user's context if we know that
841 // only bits from X or Y are demanded.
843 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
844 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
847 // Output known-0 bits are known if clear or set in both the LHS & RHS.
848 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
849 (RHSKnown.One & LHSKnown.One);
850 // Output known-1 are known to be set if set in only one of the LHS, RHS.
851 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
852 (RHSKnown.One & LHSKnown.Zero);
854 // If the client is only demanding bits that we know, return the known
856 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
857 return Constant::getIntegerValue(ITy, IKnownOne);
859 // If all of the demanded bits are known zero on one side, return the
861 if (DemandedMask.isSubsetOf(RHSKnown.Zero))
862 return I->getOperand(0);
863 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
864 return I->getOperand(1);
866 // Output known-0 bits are known if clear or set in both the LHS & RHS.
867 Known.Zero = std::move(IKnownZero);
868 // Output known-1 are known to be set if set in only one of the LHS, RHS.
869 Known.One = std::move(IKnownOne);
873 // Compute the Known bits to simplify things downstream.
874 computeKnownBits(I, Known, Depth, CxtI);
876 // If this user is only demanding bits that we know, return the known
878 if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
879 return Constant::getIntegerValue(ITy, Known.One);
888 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify
889 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into
890 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign
893 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1,
894 /// ..., bn}, without considering the specific value X is holding.
895 /// This transformation is legal iff one of following conditions is hold:
896 /// 1) All the bit in S are 0, in this case E1 == E2.
897 /// 2) We don't care those bits in S, per the input DemandedMask.
898 /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the
901 /// Currently we only test condition 2).
903 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
906 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
907 Instruction *Shl, const APInt &ShlOp1,
908 const APInt &DemandedMask,
910 if (!ShlOp1 || !ShrOp1)
911 return nullptr; // No-op.
913 Value *VarX = Shr->getOperand(0);
914 Type *Ty = VarX->getType();
915 unsigned BitWidth = Ty->getScalarSizeInBits();
916 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
917 return nullptr; // Undef.
919 unsigned ShlAmt = ShlOp1.getZExtValue();
920 unsigned ShrAmt = ShrOp1.getZExtValue();
922 Known.One.clearAllBits();
923 Known.Zero.setLowBits(ShlAmt - 1);
924 Known.Zero &= DemandedMask;
926 APInt BitMask1(APInt::getAllOnesValue(BitWidth));
927 APInt BitMask2(APInt::getAllOnesValue(BitWidth));
929 bool isLshr = (Shr->getOpcode() == Instruction::LShr);
930 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
931 (BitMask1.ashr(ShrAmt) << ShlAmt);
933 if (ShrAmt <= ShlAmt) {
934 BitMask2 <<= (ShlAmt - ShrAmt);
936 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
937 BitMask2.ashr(ShrAmt - ShlAmt);
940 // Check if condition-2 (see the comment to this function) is satified.
941 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
942 if (ShrAmt == ShlAmt)
945 if (!Shr->hasOneUse())
949 if (ShrAmt < ShlAmt) {
950 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
951 New = BinaryOperator::CreateShl(VarX, Amt);
952 BinaryOperator *Orig = cast<BinaryOperator>(Shl);
953 New->setHasNoSignedWrap(Orig->hasNoSignedWrap());
954 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap());
956 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
957 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
958 BinaryOperator::CreateAShr(VarX, Amt);
959 if (cast<BinaryOperator>(Shr)->isExact())
960 New->setIsExact(true);
963 return InsertNewInstWith(New, *Shl);
969 /// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
970 Value *InstCombiner::simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II,
974 unsigned VWidth = II->getType()->getVectorNumElements();
978 // Need to change to new instruction format
979 ConstantInt *TFC = nullptr;
980 bool TFELWEEnabled = false;
982 TFC = dyn_cast<ConstantInt>(II->getArgOperand(TFCIdx));
983 TFELWEEnabled = TFC->getZExtValue() & 0x1 // TFE
984 || TFC->getZExtValue() & 0x2; // LWE
988 return nullptr; // TFE not yet supported
990 ConstantInt *NewDMask = nullptr;
993 // Pretend that a prefix of elements is demanded to simplify the code
995 DemandedElts = (1 << DemandedElts.getActiveBits()) - 1;
997 ConstantInt *DMask = dyn_cast<ConstantInt>(II->getArgOperand(DMaskIdx));
999 return nullptr; // non-constant dmask is not supported by codegen
1001 unsigned DMaskVal = DMask->getZExtValue() & 0xf;
1003 // Mask off values that are undefined because the dmask doesn't cover them
1004 DemandedElts &= (1 << countPopulation(DMaskVal)) - 1;
1006 unsigned NewDMaskVal = 0;
1007 unsigned OrigLoadIdx = 0;
1008 for (unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) {
1009 const unsigned Bit = 1 << SrcIdx;
1010 if (!!(DMaskVal & Bit)) {
1011 if (!!DemandedElts[OrigLoadIdx])
1017 if (DMaskVal != NewDMaskVal)
1018 NewDMask = ConstantInt::get(DMask->getType(), NewDMaskVal);
1021 // TODO: Handle 3 vectors when supported in code gen.
1022 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countPopulation());
1024 return UndefValue::get(II->getType());
1026 if (NewNumElts >= VWidth && DemandedElts.isMask()) {
1028 II->setArgOperand(DMaskIdx, NewDMask);
1032 // Determine the overload types of the original intrinsic.
1033 auto IID = II->getIntrinsicID();
1034 SmallVector<Intrinsic::IITDescriptor, 16> Table;
1035 getIntrinsicInfoTableEntries(IID, Table);
1036 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
1038 FunctionType *FTy = II->getCalledFunction()->getFunctionType();
1039 SmallVector<Type *, 6> OverloadTys;
1040 Intrinsic::matchIntrinsicType(FTy->getReturnType(), TableRef, OverloadTys);
1041 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
1042 Intrinsic::matchIntrinsicType(FTy->getParamType(i), TableRef, OverloadTys);
1044 // Get the new return type overload of the intrinsic.
1045 Module *M = II->getParent()->getParent()->getParent();
1046 Type *EltTy = II->getType()->getVectorElementType();
1047 Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts);
1049 OverloadTys[0] = NewTy;
1050 Function *NewIntrin = Intrinsic::getDeclaration(M, IID, OverloadTys);
1052 SmallVector<Value *, 16> Args;
1053 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
1054 Args.push_back(II->getArgOperand(I));
1057 Args[DMaskIdx] = NewDMask;
1059 IRBuilderBase::InsertPointGuard Guard(Builder);
1060 Builder.SetInsertPoint(II);
1062 CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
1063 NewCall->takeName(II);
1064 NewCall->copyMetadata(*II);
1066 if (NewNumElts == 1) {
1067 return Builder.CreateInsertElement(UndefValue::get(II->getType()), NewCall,
1068 DemandedElts.countTrailingZeros());
1071 SmallVector<uint32_t, 8> EltMask;
1072 unsigned NewLoadIdx = 0;
1073 for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
1074 if (!!DemandedElts[OrigLoadIdx])
1075 EltMask.push_back(NewLoadIdx++);
1077 EltMask.push_back(NewNumElts);
1081 Builder.CreateShuffleVector(NewCall, UndefValue::get(NewTy), EltMask);
1086 /// The specified value produces a vector with any number of elements.
1087 /// DemandedElts contains the set of elements that are actually used by the
1088 /// caller. This method analyzes which elements of the operand are undef and
1089 /// returns that information in UndefElts.
1091 /// If the information about demanded elements can be used to simplify the
1092 /// operation, the operation is simplified, then the resultant value is
1093 /// returned. This returns null if no change was made.
1094 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1097 unsigned VWidth = V->getType()->getVectorNumElements();
1098 APInt EltMask(APInt::getAllOnesValue(VWidth));
1099 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1101 if (isa<UndefValue>(V)) {
1102 // If the entire vector is undefined, just return this info.
1103 UndefElts = EltMask;
1107 if (DemandedElts.isNullValue()) { // If nothing is demanded, provide undef.
1108 UndefElts = EltMask;
1109 return UndefValue::get(V->getType());
1114 if (auto *C = dyn_cast<Constant>(V)) {
1115 // Check if this is identity. If so, return 0 since we are not simplifying
1117 if (DemandedElts.isAllOnesValue())
1120 Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1121 Constant *Undef = UndefValue::get(EltTy);
1122 SmallVector<Constant*, 16> Elts;
1123 for (unsigned i = 0; i != VWidth; ++i) {
1124 if (!DemandedElts[i]) { // If not demanded, set to undef.
1125 Elts.push_back(Undef);
1126 UndefElts.setBit(i);
1130 Constant *Elt = C->getAggregateElement(i);
1131 if (!Elt) return nullptr;
1133 if (isa<UndefValue>(Elt)) { // Already undef.
1134 Elts.push_back(Undef);
1135 UndefElts.setBit(i);
1136 } else { // Otherwise, defined.
1137 Elts.push_back(Elt);
1141 // If we changed the constant, return it.
1142 Constant *NewCV = ConstantVector::get(Elts);
1143 return NewCV != C ? NewCV : nullptr;
1146 // Limit search depth.
1150 // If multiple users are using the root value, proceed with
1151 // simplification conservatively assuming that all elements
1153 if (!V->hasOneUse()) {
1154 // Quit if we find multiple users of a non-root value though.
1155 // They'll be handled when it's their turn to be visited by
1156 // the main instcombine process.
1158 // TODO: Just compute the UndefElts information recursively.
1161 // Conservatively assume that all elements are needed.
1162 DemandedElts = EltMask;
1165 Instruction *I = dyn_cast<Instruction>(V);
1166 if (!I) return nullptr; // Only analyze instructions.
1168 bool MadeChange = false;
1169 auto simplifyAndSetOp = [&](Instruction *Inst, unsigned OpNum,
1170 APInt Demanded, APInt &Undef) {
1171 auto *II = dyn_cast<IntrinsicInst>(Inst);
1172 Value *Op = II ? II->getArgOperand(OpNum) : Inst->getOperand(OpNum);
1173 if (Value *V = SimplifyDemandedVectorElts(Op, Demanded, Undef, Depth + 1)) {
1175 II->setArgOperand(OpNum, V);
1177 Inst->setOperand(OpNum, V);
1182 APInt UndefElts2(VWidth, 0);
1183 APInt UndefElts3(VWidth, 0);
1184 switch (I->getOpcode()) {
1187 case Instruction::InsertElement: {
1188 // If this is a variable index, we don't know which element it overwrites.
1189 // demand exactly the same input as we produce.
1190 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1192 // Note that we can't propagate undef elt info, because we don't know
1193 // which elt is getting updated.
1194 simplifyAndSetOp(I, 0, DemandedElts, UndefElts2);
1198 // The element inserted overwrites whatever was there, so the input demanded
1199 // set is simpler than the output set.
1200 unsigned IdxNo = Idx->getZExtValue();
1201 APInt PreInsertDemandedElts = DemandedElts;
1203 PreInsertDemandedElts.clearBit(IdxNo);
1205 simplifyAndSetOp(I, 0, PreInsertDemandedElts, UndefElts);
1207 // If this is inserting an element that isn't demanded, remove this
1209 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1211 return I->getOperand(0);
1214 // The inserted element is defined.
1215 UndefElts.clearBit(IdxNo);
1218 case Instruction::ShuffleVector: {
1219 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1220 unsigned LHSVWidth =
1221 Shuffle->getOperand(0)->getType()->getVectorNumElements();
1222 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1223 for (unsigned i = 0; i < VWidth; i++) {
1224 if (DemandedElts[i]) {
1225 unsigned MaskVal = Shuffle->getMaskValue(i);
1226 if (MaskVal != -1u) {
1227 assert(MaskVal < LHSVWidth * 2 &&
1228 "shufflevector mask index out of range!");
1229 if (MaskVal < LHSVWidth)
1230 LeftDemanded.setBit(MaskVal);
1232 RightDemanded.setBit(MaskVal - LHSVWidth);
1237 APInt LHSUndefElts(LHSVWidth, 0);
1238 simplifyAndSetOp(I, 0, LeftDemanded, LHSUndefElts);
1240 APInt RHSUndefElts(LHSVWidth, 0);
1241 simplifyAndSetOp(I, 1, RightDemanded, RHSUndefElts);
1243 bool NewUndefElts = false;
1244 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1245 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1246 bool LHSUniform = true;
1247 bool RHSUniform = true;
1248 for (unsigned i = 0; i < VWidth; i++) {
1249 unsigned MaskVal = Shuffle->getMaskValue(i);
1250 if (MaskVal == -1u) {
1251 UndefElts.setBit(i);
1252 } else if (!DemandedElts[i]) {
1253 NewUndefElts = true;
1254 UndefElts.setBit(i);
1255 } else if (MaskVal < LHSVWidth) {
1256 if (LHSUndefElts[MaskVal]) {
1257 NewUndefElts = true;
1258 UndefElts.setBit(i);
1260 LHSIdx = LHSIdx == -1u ? i : LHSVWidth;
1261 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth;
1262 LHSUniform = LHSUniform && (MaskVal == i);
1265 if (RHSUndefElts[MaskVal - LHSVWidth]) {
1266 NewUndefElts = true;
1267 UndefElts.setBit(i);
1269 RHSIdx = RHSIdx == -1u ? i : LHSVWidth;
1270 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth;
1271 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i);
1276 // Try to transform shuffle with constant vector and single element from
1277 // this constant vector to single insertelement instruction.
1278 // shufflevector V, C, <v1, v2, .., ci, .., vm> ->
1279 // insertelement V, C[ci], ci-n
1280 if (LHSVWidth == Shuffle->getType()->getNumElements()) {
1281 Value *Op = nullptr;
1282 Constant *Value = nullptr;
1285 // Find constant vector with the single element in shuffle (LHS or RHS).
1286 if (LHSIdx < LHSVWidth && RHSUniform) {
1287 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1288 Op = Shuffle->getOperand(1);
1289 Value = CV->getOperand(LHSValIdx);
1293 if (RHSIdx < LHSVWidth && LHSUniform) {
1294 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1295 Op = Shuffle->getOperand(0);
1296 Value = CV->getOperand(RHSValIdx);
1300 // Found constant vector with single element - convert to insertelement.
1302 Instruction *New = InsertElementInst::Create(
1303 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx),
1304 Shuffle->getName());
1305 InsertNewInstWith(New, *Shuffle);
1310 // Add additional discovered undefs.
1311 SmallVector<Constant*, 16> Elts;
1312 for (unsigned i = 0; i < VWidth; ++i) {
1314 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
1316 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
1317 Shuffle->getMaskValue(i)));
1319 I->setOperand(2, ConstantVector::get(Elts));
1324 case Instruction::Select: {
1325 // If this is a vector select, try to transform the select condition based
1326 // on the current demanded elements.
1327 SelectInst *Sel = cast<SelectInst>(I);
1328 if (Sel->getCondition()->getType()->isVectorTy()) {
1329 // TODO: We are not doing anything with UndefElts based on this call.
1330 // It is overwritten below based on the other select operands. If an
1331 // element of the select condition is known undef, then we are free to
1332 // choose the output value from either arm of the select. If we know that
1333 // one of those values is undef, then the output can be undef.
1334 simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1337 // Next, see if we can transform the arms of the select.
1338 APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1339 if (auto *CV = dyn_cast<ConstantVector>(Sel->getCondition())) {
1340 for (unsigned i = 0; i < VWidth; i++) {
1341 // isNullValue() always returns false when called on a ConstantExpr.
1342 // Skip constant expressions to avoid propagating incorrect information.
1343 Constant *CElt = CV->getAggregateElement(i);
1344 if (isa<ConstantExpr>(CElt))
1346 // TODO: If a select condition element is undef, we can demand from
1347 // either side. If one side is known undef, choosing that side would
1349 if (CElt->isNullValue())
1350 DemandedLHS.clearBit(i);
1352 DemandedRHS.clearBit(i);
1356 simplifyAndSetOp(I, 1, DemandedLHS, UndefElts2);
1357 simplifyAndSetOp(I, 2, DemandedRHS, UndefElts3);
1359 // Output elements are undefined if the element from each arm is undefined.
1360 // TODO: This can be improved. See comment in select condition handling.
1361 UndefElts = UndefElts2 & UndefElts3;
1364 case Instruction::BitCast: {
1365 // Vector->vector casts only.
1366 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1368 unsigned InVWidth = VTy->getNumElements();
1369 APInt InputDemandedElts(InVWidth, 0);
1370 UndefElts2 = APInt(InVWidth, 0);
1373 if (VWidth == InVWidth) {
1374 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1375 // elements as are demanded of us.
1377 InputDemandedElts = DemandedElts;
1378 } else if ((VWidth % InVWidth) == 0) {
1379 // If the number of elements in the output is a multiple of the number of
1380 // elements in the input then an input element is live if any of the
1381 // corresponding output elements are live.
1382 Ratio = VWidth / InVWidth;
1383 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1384 if (DemandedElts[OutIdx])
1385 InputDemandedElts.setBit(OutIdx / Ratio);
1386 } else if ((InVWidth % VWidth) == 0) {
1387 // If the number of elements in the input is a multiple of the number of
1388 // elements in the output then an input element is live if the
1389 // corresponding output element is live.
1390 Ratio = InVWidth / VWidth;
1391 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1392 if (DemandedElts[InIdx / Ratio])
1393 InputDemandedElts.setBit(InIdx);
1395 // Unsupported so far.
1399 simplifyAndSetOp(I, 0, InputDemandedElts, UndefElts2);
1401 if (VWidth == InVWidth) {
1402 UndefElts = UndefElts2;
1403 } else if ((VWidth % InVWidth) == 0) {
1404 // If the number of elements in the output is a multiple of the number of
1405 // elements in the input then an output element is undef if the
1406 // corresponding input element is undef.
1407 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1408 if (UndefElts2[OutIdx / Ratio])
1409 UndefElts.setBit(OutIdx);
1410 } else if ((InVWidth % VWidth) == 0) {
1411 // If the number of elements in the input is a multiple of the number of
1412 // elements in the output then an output element is undef if all of the
1413 // corresponding input elements are undef.
1414 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1415 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1416 if (SubUndef.countPopulation() == Ratio)
1417 UndefElts.setBit(OutIdx);
1420 llvm_unreachable("Unimp");
1424 case Instruction::FPTrunc:
1425 case Instruction::FPExt:
1426 simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1429 case Instruction::Call: {
1430 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1432 switch (II->getIntrinsicID()) {
1433 case Intrinsic::x86_xop_vfrcz_ss:
1434 case Intrinsic::x86_xop_vfrcz_sd:
1435 // The instructions for these intrinsics are speced to zero upper bits not
1436 // pass them through like other scalar intrinsics. So we shouldn't just
1437 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics.
1438 // Instead we should return a zero vector.
1439 if (!DemandedElts[0]) {
1441 return ConstantAggregateZero::get(II->getType());
1444 // Only the lower element is used.
1446 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1448 // Only the lower element is undefined. The high elements are zero.
1449 UndefElts = UndefElts[0];
1452 // Unary scalar-as-vector operations that work column-wise.
1453 case Intrinsic::x86_sse_rcp_ss:
1454 case Intrinsic::x86_sse_rsqrt_ss:
1455 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1457 // If lowest element of a scalar op isn't used then use Arg0.
1458 if (!DemandedElts[0]) {
1460 return II->getArgOperand(0);
1462 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions
1466 // Binary scalar-as-vector operations that work column-wise. The high
1467 // elements come from operand 0. The low element is a function of both
1469 case Intrinsic::x86_sse_min_ss:
1470 case Intrinsic::x86_sse_max_ss:
1471 case Intrinsic::x86_sse_cmp_ss:
1472 case Intrinsic::x86_sse2_min_sd:
1473 case Intrinsic::x86_sse2_max_sd:
1474 case Intrinsic::x86_sse2_cmp_sd: {
1475 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1477 // If lowest element of a scalar op isn't used then use Arg0.
1478 if (!DemandedElts[0]) {
1480 return II->getArgOperand(0);
1483 // Only lower element is used for operand 1.
1485 simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1487 // Lower element is undefined if both lower elements are undefined.
1488 // Consider things like undef&0. The result is known zero, not undef.
1490 UndefElts.clearBit(0);
1495 // Binary scalar-as-vector operations that work column-wise. The high
1496 // elements come from operand 0 and the low element comes from operand 1.
1497 case Intrinsic::x86_sse41_round_ss:
1498 case Intrinsic::x86_sse41_round_sd: {
1499 // Don't use the low element of operand 0.
1500 APInt DemandedElts2 = DemandedElts;
1501 DemandedElts2.clearBit(0);
1502 simplifyAndSetOp(II, 0, DemandedElts2, UndefElts);
1504 // If lowest element of a scalar op isn't used then use Arg0.
1505 if (!DemandedElts[0]) {
1507 return II->getArgOperand(0);
1510 // Only lower element is used for operand 1.
1512 simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1514 // Take the high undef elements from operand 0 and take the lower element
1516 UndefElts.clearBit(0);
1517 UndefElts |= UndefElts2[0];
1521 // Three input scalar-as-vector operations that work column-wise. The high
1522 // elements come from operand 0 and the low element is a function of all
1524 case Intrinsic::x86_avx512_mask_add_ss_round:
1525 case Intrinsic::x86_avx512_mask_div_ss_round:
1526 case Intrinsic::x86_avx512_mask_mul_ss_round:
1527 case Intrinsic::x86_avx512_mask_sub_ss_round:
1528 case Intrinsic::x86_avx512_mask_max_ss_round:
1529 case Intrinsic::x86_avx512_mask_min_ss_round:
1530 case Intrinsic::x86_avx512_mask_add_sd_round:
1531 case Intrinsic::x86_avx512_mask_div_sd_round:
1532 case Intrinsic::x86_avx512_mask_mul_sd_round:
1533 case Intrinsic::x86_avx512_mask_sub_sd_round:
1534 case Intrinsic::x86_avx512_mask_max_sd_round:
1535 case Intrinsic::x86_avx512_mask_min_sd_round:
1536 simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1538 // If lowest element of a scalar op isn't used then use Arg0.
1539 if (!DemandedElts[0]) {
1541 return II->getArgOperand(0);
1544 // Only lower element is used for operand 1 and 2.
1546 simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1547 simplifyAndSetOp(II, 2, DemandedElts, UndefElts3);
1549 // Lower element is undefined if all three lower elements are undefined.
1550 // Consider things like undef&0. The result is known zero, not undef.
1551 if (!UndefElts2[0] || !UndefElts3[0])
1552 UndefElts.clearBit(0);
1556 case Intrinsic::x86_sse2_packssdw_128:
1557 case Intrinsic::x86_sse2_packsswb_128:
1558 case Intrinsic::x86_sse2_packuswb_128:
1559 case Intrinsic::x86_sse41_packusdw:
1560 case Intrinsic::x86_avx2_packssdw:
1561 case Intrinsic::x86_avx2_packsswb:
1562 case Intrinsic::x86_avx2_packusdw:
1563 case Intrinsic::x86_avx2_packuswb:
1564 case Intrinsic::x86_avx512_packssdw_512:
1565 case Intrinsic::x86_avx512_packsswb_512:
1566 case Intrinsic::x86_avx512_packusdw_512:
1567 case Intrinsic::x86_avx512_packuswb_512: {
1568 auto *Ty0 = II->getArgOperand(0)->getType();
1569 unsigned InnerVWidth = Ty0->getVectorNumElements();
1570 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size");
1572 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1573 unsigned VWidthPerLane = VWidth / NumLanes;
1574 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1576 // Per lane, pack the elements of the first input and then the second.
1578 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3])
1579 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15])
1580 for (int OpNum = 0; OpNum != 2; ++OpNum) {
1581 APInt OpDemandedElts(InnerVWidth, 0);
1582 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1583 unsigned LaneIdx = Lane * VWidthPerLane;
1584 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1585 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1586 if (DemandedElts[Idx])
1587 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt);
1591 // Demand elements from the operand.
1592 APInt OpUndefElts(InnerVWidth, 0);
1593 simplifyAndSetOp(II, OpNum, OpDemandedElts, OpUndefElts);
1595 // Pack the operand's UNDEF elements, one lane at a time.
1596 OpUndefElts = OpUndefElts.zext(VWidth);
1597 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1598 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
1599 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
1600 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
1601 UndefElts |= LaneElts;
1608 case Intrinsic::x86_ssse3_pshuf_b_128:
1609 case Intrinsic::x86_avx2_pshuf_b:
1610 case Intrinsic::x86_avx512_pshuf_b_512:
1612 case Intrinsic::x86_avx_vpermilvar_ps:
1613 case Intrinsic::x86_avx_vpermilvar_ps_256:
1614 case Intrinsic::x86_avx512_vpermilvar_ps_512:
1615 case Intrinsic::x86_avx_vpermilvar_pd:
1616 case Intrinsic::x86_avx_vpermilvar_pd_256:
1617 case Intrinsic::x86_avx512_vpermilvar_pd_512:
1619 case Intrinsic::x86_avx2_permd:
1620 case Intrinsic::x86_avx2_permps: {
1621 simplifyAndSetOp(II, 1, DemandedElts, UndefElts);
1625 // SSE4A instructions leave the upper 64-bits of the 128-bit result
1626 // in an undefined state.
1627 case Intrinsic::x86_sse4a_extrq:
1628 case Intrinsic::x86_sse4a_extrqi:
1629 case Intrinsic::x86_sse4a_insertq:
1630 case Intrinsic::x86_sse4a_insertqi:
1631 UndefElts.setHighBits(VWidth / 2);
1633 case Intrinsic::amdgcn_buffer_load:
1634 case Intrinsic::amdgcn_buffer_load_format:
1635 case Intrinsic::amdgcn_raw_buffer_load:
1636 case Intrinsic::amdgcn_raw_buffer_load_format:
1637 case Intrinsic::amdgcn_struct_buffer_load:
1638 case Intrinsic::amdgcn_struct_buffer_load_format:
1639 return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts);
1641 if (getAMDGPUImageDMaskIntrinsic(II->getIntrinsicID()))
1642 return simplifyAMDGCNMemoryIntrinsicDemanded(
1643 II, DemandedElts, 0, II->getNumArgOperands() - 2);
1647 } // switch on IntrinsicID
1650 } // switch on Opcode
1652 // TODO: We bail completely on integer div/rem and shifts because they have
1653 // UB/poison potential, but that should be refined.
1655 if (match(I, m_BinOp(BO)) && !BO->isIntDivRem() && !BO->isShift()) {
1656 simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1657 simplifyAndSetOp(I, 1, DemandedElts, UndefElts2);
1659 // Any change to an instruction with potential poison must clear those flags
1660 // because we can not guarantee those constraints now. Other analysis may
1661 // determine that it is safe to re-apply the flags.
1663 BO->dropPoisonGeneratingFlags();
1665 // Output elements are undefined if both are undefined. Consider things
1666 // like undef & 0. The result is known zero, not undef.
1667 UndefElts &= UndefElts2;
1670 return MadeChange ? I : nullptr;