1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains logic for simplifying instructions based on information
11 // about how they are used.
13 //===----------------------------------------------------------------------===//
15 #include "InstCombineInternal.h"
16 #include "llvm/Analysis/ValueTracking.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Support/KnownBits.h"
22 using namespace llvm::PatternMatch;
24 #define DEBUG_TYPE "instcombine"
26 /// Check to see if the specified operand of the specified instruction is a
27 /// constant integer. If so, check to see if there are any bits set in the
28 /// constant that are not demanded. If so, shrink the constant and return true.
29 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
30 const APInt &Demanded) {
31 assert(I && "No instruction?");
32 assert(OpNo < I->getNumOperands() && "Operand index too large");
34 // The operand must be a constant integer or splat integer.
35 Value *Op = I->getOperand(OpNo);
37 if (!match(Op, m_APInt(C)))
40 // If there are no bits set that aren't demanded, nothing to do.
41 if (C->isSubsetOf(Demanded))
44 // This instruction is producing bits that are not demanded. Shrink the RHS.
45 I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
52 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if
53 /// the instruction has any properties that allow us to simplify its operands.
54 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
55 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
56 KnownBits Known(BitWidth);
57 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
59 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
62 if (V == &Inst) return true;
63 replaceInstUsesWith(Inst, V);
67 /// This form of SimplifyDemandedBits simplifies the specified instruction
68 /// operand if possible, updating it in place. It returns true if it made any
69 /// change and false otherwise.
70 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
71 const APInt &DemandedMask,
74 Use &U = I->getOperandUse(OpNo);
75 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known,
77 if (!NewVal) return false;
83 /// This function attempts to replace V with a simpler value based on the
84 /// demanded bits. When this function is called, it is known that only the bits
85 /// set in DemandedMask of the result of V are ever used downstream.
86 /// Consequently, depending on the mask and V, it may be possible to replace V
87 /// with a constant or one of its operands. In such cases, this function does
88 /// the replacement and returns true. In all other cases, it returns false after
89 /// analyzing the expression and setting KnownOne and known to be one in the
90 /// expression. Known.Zero contains all the bits that are known to be zero in
91 /// the expression. These are provided to potentially allow the caller (which
92 /// might recursively be SimplifyDemandedBits itself) to simplify the
94 /// Known.One and Known.Zero always follow the invariant that:
95 /// Known.One & Known.Zero == 0.
96 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
97 /// Known.Zero may only be accurate for those bits set in DemandedMask. Note
98 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all
101 /// This returns null if it did not change anything and it permits no
102 /// simplification. This returns V itself if it did some simplification of V's
103 /// operands based on the information about what bits are demanded. This returns
104 /// some other non-null value if it found out that V is equal to another value
105 /// in the context where the specified bits are demanded, but not for all users.
106 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
107 KnownBits &Known, unsigned Depth,
109 assert(V != nullptr && "Null pointer of Value???");
110 assert(Depth <= 6 && "Limit Search Depth");
111 uint32_t BitWidth = DemandedMask.getBitWidth();
112 Type *VTy = V->getType();
114 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
115 Known.getBitWidth() == BitWidth &&
116 "Value *V, DemandedMask and Known must have same BitWidth");
118 if (isa<Constant>(V)) {
119 computeKnownBits(V, Known, Depth, CxtI);
124 if (DemandedMask.isNullValue()) // Not demanding any bits from V.
125 return UndefValue::get(VTy);
127 if (Depth == 6) // Limit search depth.
130 Instruction *I = dyn_cast<Instruction>(V);
132 computeKnownBits(V, Known, Depth, CxtI);
133 return nullptr; // Only analyze instructions.
136 // If there are multiple uses of this value and we aren't at the root, then
137 // we can't do any simplifications of the operands, because DemandedMask
138 // only reflects the bits demanded by *one* of the users.
139 if (Depth != 0 && !I->hasOneUse())
140 return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
142 KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
144 // If this is the root being simplified, allow it to have multiple uses,
145 // just set the DemandedMask to all bits so that we can try to simplify the
146 // operands. This allows visitTruncInst (for example) to simplify the
147 // operand of a trunc without duplicating all the logic below.
148 if (Depth == 0 && !V->hasOneUse())
149 DemandedMask.setAllBits();
151 switch (I->getOpcode()) {
153 computeKnownBits(I, Known, Depth, CxtI);
155 case Instruction::And: {
156 // If either the LHS or the RHS are Zero, the result is zero.
157 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
158 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
161 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
162 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
164 // Output known-0 are known to be clear if zero in either the LHS | RHS.
165 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
166 // Output known-1 bits are only known if set in both the LHS & RHS.
167 APInt IKnownOne = RHSKnown.One & LHSKnown.One;
169 // If the client is only demanding bits that we know, return the known
171 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
172 return Constant::getIntegerValue(VTy, IKnownOne);
174 // If all of the demanded bits are known 1 on one side, return the other.
175 // These bits cannot contribute to the result of the 'and'.
176 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
177 return I->getOperand(0);
178 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
179 return I->getOperand(1);
181 // If the RHS is a constant, see if we can simplify it.
182 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero))
185 Known.Zero = std::move(IKnownZero);
186 Known.One = std::move(IKnownOne);
189 case Instruction::Or: {
190 // If either the LHS or the RHS are One, the result is One.
191 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
192 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
195 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
196 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
198 // Output known-0 bits are only known if clear in both the LHS & RHS.
199 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
200 // Output known-1 are known. to be set if s.et in either the LHS | RHS.
201 APInt IKnownOne = RHSKnown.One | LHSKnown.One;
203 // If the client is only demanding bits that we know, return the known
205 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
206 return Constant::getIntegerValue(VTy, IKnownOne);
208 // If all of the demanded bits are known zero on one side, return the other.
209 // These bits cannot contribute to the result of the 'or'.
210 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
211 return I->getOperand(0);
212 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
213 return I->getOperand(1);
215 // If the RHS is a constant, see if we can simplify it.
216 if (ShrinkDemandedConstant(I, 1, DemandedMask))
219 Known.Zero = std::move(IKnownZero);
220 Known.One = std::move(IKnownOne);
223 case Instruction::Xor: {
224 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
225 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
227 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
228 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
230 // Output known-0 bits are known if clear or set in both the LHS & RHS.
231 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
232 (RHSKnown.One & LHSKnown.One);
233 // Output known-1 are known to be set if set in only one of the LHS, RHS.
234 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
235 (RHSKnown.One & LHSKnown.Zero);
237 // If the client is only demanding bits that we know, return the known
239 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
240 return Constant::getIntegerValue(VTy, IKnownOne);
242 // If all of the demanded bits are known zero on one side, return the other.
243 // These bits cannot contribute to the result of the 'xor'.
244 if (DemandedMask.isSubsetOf(RHSKnown.Zero))
245 return I->getOperand(0);
246 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
247 return I->getOperand(1);
249 // If all of the demanded bits are known to be zero on one side or the
250 // other, turn this into an *inclusive* or.
251 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
252 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
254 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
256 return InsertNewInstWith(Or, *I);
259 // If all of the demanded bits on one side are known, and all of the set
260 // bits on that side are also known to be set on the other side, turn this
261 // into an AND, as we know the bits will be cleared.
262 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
263 if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
264 RHSKnown.One.isSubsetOf(LHSKnown.One)) {
265 Constant *AndC = Constant::getIntegerValue(VTy,
266 ~RHSKnown.One & DemandedMask);
267 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
268 return InsertNewInstWith(And, *I);
271 // If the RHS is a constant, see if we can simplify it.
272 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
273 if (ShrinkDemandedConstant(I, 1, DemandedMask))
276 // If our LHS is an 'and' and if it has one use, and if any of the bits we
277 // are flipping are known to be set, then the xor is just resetting those
278 // bits to zero. We can just knock out bits from the 'and' and the 'xor',
279 // simplifying both of them.
280 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
281 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
282 isa<ConstantInt>(I->getOperand(1)) &&
283 isa<ConstantInt>(LHSInst->getOperand(1)) &&
284 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
285 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
286 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
287 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
290 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
291 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
292 InsertNewInstWith(NewAnd, *I);
295 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
296 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
297 return InsertNewInstWith(NewXor, *I);
300 // Output known-0 bits are known if clear or set in both the LHS & RHS.
301 Known.Zero = std::move(IKnownZero);
302 // Output known-1 are known to be set if set in only one of the LHS, RHS.
303 Known.One = std::move(IKnownOne);
306 case Instruction::Select:
307 // If this is a select as part of a min/max pattern, don't simplify any
308 // further in case we break the structure.
310 if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN)
313 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
314 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
316 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
317 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
319 // If the operands are constants, see if we can simplify them.
320 if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
321 ShrinkDemandedConstant(I, 2, DemandedMask))
324 // Only known if known in both the LHS and RHS.
325 Known.One = RHSKnown.One & LHSKnown.One;
326 Known.Zero = RHSKnown.Zero & LHSKnown.Zero;
328 case Instruction::ZExt:
329 case Instruction::Trunc: {
330 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
332 APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
333 KnownBits InputKnown(SrcBitWidth);
334 if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1))
336 Known = Known.zextOrTrunc(BitWidth);
337 // Any top bits are known to be zero.
338 if (BitWidth > SrcBitWidth)
339 Known.Zero.setBitsFrom(SrcBitWidth);
340 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
343 case Instruction::BitCast:
344 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
345 return nullptr; // vector->int or fp->int?
347 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
348 if (VectorType *SrcVTy =
349 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
350 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
351 // Don't touch a bitcast between vectors of different element counts.
354 // Don't touch a scalar-to-vector bitcast.
356 } else if (I->getOperand(0)->getType()->isVectorTy())
357 // Don't touch a vector-to-scalar bitcast.
360 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
362 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
364 case Instruction::SExt: {
365 // Compute the bits in the result that are not present in the input.
366 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
368 APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth);
370 // If any of the sign extended bits are demanded, we know that the sign
372 if (DemandedMask.getActiveBits() > SrcBitWidth)
373 InputDemandedBits.setBit(SrcBitWidth-1);
375 KnownBits InputKnown(SrcBitWidth);
376 if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1))
379 // If the input sign bit is known zero, or if the NewBits are not demanded
380 // convert this into a zero extension.
381 if (InputKnown.isNonNegative() ||
382 DemandedMask.getActiveBits() <= SrcBitWidth) {
383 // Convert to ZExt cast.
384 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
385 return InsertNewInstWith(NewCast, *I);
388 // If the sign bit of the input is known set or clear, then we know the
389 // top bits of the result.
390 Known = InputKnown.sext(BitWidth);
391 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
394 case Instruction::Add:
395 case Instruction::Sub: {
396 /// If the high-bits of an ADD/SUB are not demanded, then we do not care
397 /// about the high bits of the operands.
398 unsigned NLZ = DemandedMask.countLeadingZeros();
400 // Right fill the mask of bits for this ADD/SUB to demand the most
401 // significant bit and all those below it.
402 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
403 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
404 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
405 ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
406 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
407 // Disable the nsw and nuw flags here: We can no longer guarantee that
408 // we won't wrap after simplification. Removing the nsw/nuw flags is
409 // legal here because the top bit is not demanded.
410 BinaryOperator &BinOP = *cast<BinaryOperator>(I);
411 BinOP.setHasNoSignedWrap(false);
412 BinOP.setHasNoUnsignedWrap(false);
416 // If we are known to be adding/subtracting zeros to every bit below
417 // the highest demanded bit, we just return the other side.
418 if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
419 return I->getOperand(0);
420 // We can't do this with the LHS for subtraction.
421 if (I->getOpcode() == Instruction::Add &&
422 DemandedFromOps.isSubsetOf(LHSKnown.Zero))
423 return I->getOperand(1);
426 // Otherwise just hand the add/sub off to computeKnownBits to fill in
427 // the known zeros and ones.
428 computeKnownBits(V, Known, Depth, CxtI);
431 case Instruction::Shl: {
433 if (match(I->getOperand(1), m_APInt(SA))) {
435 if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt)))) {
436 Instruction *Shr = cast<Instruction>(I->getOperand(0));
437 if (Value *R = simplifyShrShlDemandedBits(
438 Shr, *ShrAmt, I, *SA, DemandedMask, Known))
442 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
443 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
445 // If the shift is NUW/NSW, then it does demand the high bits.
446 ShlOperator *IOp = cast<ShlOperator>(I);
447 if (IOp->hasNoSignedWrap())
448 DemandedMaskIn.setHighBits(ShiftAmt+1);
449 else if (IOp->hasNoUnsignedWrap())
450 DemandedMaskIn.setHighBits(ShiftAmt);
452 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
454 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
455 Known.Zero <<= ShiftAmt;
456 Known.One <<= ShiftAmt;
457 // low bits known zero.
459 Known.Zero.setLowBits(ShiftAmt);
463 case Instruction::LShr: {
465 if (match(I->getOperand(1), m_APInt(SA))) {
466 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
468 // Unsigned shift right.
469 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
471 // If the shift is exact, then it does demand the low bits (and knows that
473 if (cast<LShrOperator>(I)->isExact())
474 DemandedMaskIn.setLowBits(ShiftAmt);
476 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
478 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
479 Known.Zero.lshrInPlace(ShiftAmt);
480 Known.One.lshrInPlace(ShiftAmt);
482 Known.Zero.setHighBits(ShiftAmt); // high bits known zero.
486 case Instruction::AShr: {
487 // If this is an arithmetic shift right and only the low-bit is set, we can
488 // always convert this into a logical shr, even if the shift amount is
489 // variable. The low bit of the shift cannot be an input sign bit unless
490 // the shift amount is >= the size of the datatype, which is undefined.
491 if (DemandedMask.isOneValue()) {
492 // Perform the logical shift right.
493 Instruction *NewVal = BinaryOperator::CreateLShr(
494 I->getOperand(0), I->getOperand(1), I->getName());
495 return InsertNewInstWith(NewVal, *I);
498 // If the sign bit is the only bit demanded by this ashr, then there is no
499 // need to do it, the shift doesn't change the high bit.
500 if (DemandedMask.isSignMask())
501 return I->getOperand(0);
504 if (match(I->getOperand(1), m_APInt(SA))) {
505 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
507 // Signed shift right.
508 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
509 // If any of the high bits are demanded, we should set the sign bit as
511 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
512 DemandedMaskIn.setSignBit();
514 // If the shift is exact, then it does demand the low bits (and knows that
516 if (cast<AShrOperator>(I)->isExact())
517 DemandedMaskIn.setLowBits(ShiftAmt);
519 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
522 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
523 // Compute the new bits that are at the top now.
524 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
525 Known.Zero.lshrInPlace(ShiftAmt);
526 Known.One.lshrInPlace(ShiftAmt);
528 // Handle the sign bits.
529 APInt SignMask(APInt::getSignMask(BitWidth));
530 // Adjust to where it is now in the mask.
531 SignMask.lshrInPlace(ShiftAmt);
533 // If the input sign bit is known to be zero, or if none of the top bits
534 // are demanded, turn this into an unsigned shift right.
535 if (BitWidth <= ShiftAmt || Known.Zero[BitWidth-ShiftAmt-1] ||
536 !DemandedMask.intersects(HighBits)) {
537 BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
539 LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
540 return InsertNewInstWith(LShr, *I);
541 } else if (Known.One.intersects(SignMask)) { // New bits are known one.
542 Known.One |= HighBits;
547 case Instruction::SRem:
548 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
549 // X % -1 demands all the bits because we don't want to introduce
550 // INT_MIN % -1 (== undef) by accident.
551 if (Rem->isAllOnesValue())
553 APInt RA = Rem->getValue().abs();
554 if (RA.isPowerOf2()) {
555 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
556 return I->getOperand(0);
558 APInt LowBits = RA - 1;
559 APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
560 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
563 // The low bits of LHS are unchanged by the srem.
564 Known.Zero = LHSKnown.Zero & LowBits;
565 Known.One = LHSKnown.One & LowBits;
567 // If LHS is non-negative or has all low bits zero, then the upper bits
569 if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero))
570 Known.Zero |= ~LowBits;
572 // If LHS is negative and not all low bits are zero, then the upper bits
574 if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One))
575 Known.One |= ~LowBits;
577 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
582 // The sign bit is the LHS's sign bit, except when the result of the
583 // remainder is zero.
584 if (DemandedMask.isSignBitSet()) {
585 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
586 // If it's known zero, our sign bit is also zero.
587 if (LHSKnown.isNonNegative())
588 Known.makeNonNegative();
591 case Instruction::URem: {
592 KnownBits Known2(BitWidth);
593 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
594 if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
595 SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
598 unsigned Leaders = Known2.countMinLeadingZeros();
599 Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
602 case Instruction::Call:
603 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
604 switch (II->getIntrinsicID()) {
606 case Intrinsic::bswap: {
607 // If the only bits demanded come from one byte of the bswap result,
608 // just shift the input byte into position to eliminate the bswap.
609 unsigned NLZ = DemandedMask.countLeadingZeros();
610 unsigned NTZ = DemandedMask.countTrailingZeros();
612 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
613 // we need all the bits down to bit 8. Likewise, round NLZ. If we
614 // have 14 leading zeros, round to 8.
617 // If we need exactly one byte, we can do this transformation.
618 if (BitWidth-NLZ-NTZ == 8) {
619 unsigned ResultBit = NTZ;
620 unsigned InputBit = BitWidth-NTZ-8;
622 // Replace this with either a left or right shift to get the byte into
625 if (InputBit > ResultBit)
626 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
627 ConstantInt::get(I->getType(), InputBit-ResultBit));
629 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
630 ConstantInt::get(I->getType(), ResultBit-InputBit));
632 return InsertNewInstWith(NewVal, *I);
635 // TODO: Could compute known zero/one bits based on the input.
638 case Intrinsic::x86_mmx_pmovmskb:
639 case Intrinsic::x86_sse_movmsk_ps:
640 case Intrinsic::x86_sse2_movmsk_pd:
641 case Intrinsic::x86_sse2_pmovmskb_128:
642 case Intrinsic::x86_avx_movmsk_ps_256:
643 case Intrinsic::x86_avx_movmsk_pd_256:
644 case Intrinsic::x86_avx2_pmovmskb: {
645 // MOVMSK copies the vector elements' sign bits to the low bits
646 // and zeros the high bits.
648 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) {
649 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>.
651 auto Arg = II->getArgOperand(0);
652 auto ArgType = cast<VectorType>(Arg->getType());
653 ArgWidth = ArgType->getNumElements();
656 // If we don't need any of low bits then return zero,
657 // we know that DemandedMask is non-zero already.
658 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth);
659 if (DemandedElts.isNullValue())
660 return ConstantInt::getNullValue(VTy);
662 // We know that the upper bits are set to zero.
663 Known.Zero.setBitsFrom(ArgWidth);
666 case Intrinsic::x86_sse42_crc32_64_64:
667 Known.Zero.setBitsFrom(32);
671 computeKnownBits(V, Known, Depth, CxtI);
675 // If the client is only demanding bits that we know, return the known
677 if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
678 return Constant::getIntegerValue(VTy, Known.One);
682 /// Helper routine of SimplifyDemandedUseBits. It computes Known
683 /// bits. It also tries to handle simplifications that can be done based on
684 /// DemandedMask, but without modifying the Instruction.
685 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
686 const APInt &DemandedMask,
690 unsigned BitWidth = DemandedMask.getBitWidth();
691 Type *ITy = I->getType();
693 KnownBits LHSKnown(BitWidth);
694 KnownBits RHSKnown(BitWidth);
696 // Despite the fact that we can't simplify this instruction in all User's
697 // context, we can at least compute the known bits, and we can
698 // do simplifications that apply to *just* the one user if we know that
699 // this instruction has a simpler value in that context.
700 switch (I->getOpcode()) {
701 case Instruction::And: {
702 // If either the LHS or the RHS are Zero, the result is zero.
703 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
704 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
707 // Output known-0 are known to be clear if zero in either the LHS | RHS.
708 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
709 // Output known-1 bits are only known if set in both the LHS & RHS.
710 APInt IKnownOne = RHSKnown.One & LHSKnown.One;
712 // If the client is only demanding bits that we know, return the known
714 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
715 return Constant::getIntegerValue(ITy, IKnownOne);
717 // If all of the demanded bits are known 1 on one side, return the other.
718 // These bits cannot contribute to the result of the 'and' in this
720 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
721 return I->getOperand(0);
722 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
723 return I->getOperand(1);
725 Known.Zero = std::move(IKnownZero);
726 Known.One = std::move(IKnownOne);
729 case Instruction::Or: {
730 // We can simplify (X|Y) -> X or Y in the user's context if we know that
731 // only bits from X or Y are demanded.
733 // If either the LHS or the RHS are One, the result is One.
734 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
735 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
738 // Output known-0 bits are only known if clear in both the LHS & RHS.
739 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
740 // Output known-1 are known to be set if set in either the LHS | RHS.
741 APInt IKnownOne = RHSKnown.One | LHSKnown.One;
743 // If the client is only demanding bits that we know, return the known
745 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
746 return Constant::getIntegerValue(ITy, IKnownOne);
748 // If all of the demanded bits are known zero on one side, return the
749 // other. These bits cannot contribute to the result of the 'or' in this
751 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
752 return I->getOperand(0);
753 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
754 return I->getOperand(1);
756 Known.Zero = std::move(IKnownZero);
757 Known.One = std::move(IKnownOne);
760 case Instruction::Xor: {
761 // We can simplify (X^Y) -> X or Y in the user's context if we know that
762 // only bits from X or Y are demanded.
764 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
765 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
768 // Output known-0 bits are known if clear or set in both the LHS & RHS.
769 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
770 (RHSKnown.One & LHSKnown.One);
771 // Output known-1 are known to be set if set in only one of the LHS, RHS.
772 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
773 (RHSKnown.One & LHSKnown.Zero);
775 // If the client is only demanding bits that we know, return the known
777 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
778 return Constant::getIntegerValue(ITy, IKnownOne);
780 // If all of the demanded bits are known zero on one side, return the
782 if (DemandedMask.isSubsetOf(RHSKnown.Zero))
783 return I->getOperand(0);
784 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
785 return I->getOperand(1);
787 // Output known-0 bits are known if clear or set in both the LHS & RHS.
788 Known.Zero = std::move(IKnownZero);
789 // Output known-1 are known to be set if set in only one of the LHS, RHS.
790 Known.One = std::move(IKnownOne);
794 // Compute the Known bits to simplify things downstream.
795 computeKnownBits(I, Known, Depth, CxtI);
797 // If this user is only demanding bits that we know, return the known
799 if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
800 return Constant::getIntegerValue(ITy, Known.One);
809 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify
810 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into
811 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign
814 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1,
815 /// ..., bn}, without considering the specific value X is holding.
816 /// This transformation is legal iff one of following conditions is hold:
817 /// 1) All the bit in S are 0, in this case E1 == E2.
818 /// 2) We don't care those bits in S, per the input DemandedMask.
819 /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the
822 /// Currently we only test condition 2).
824 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
827 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
828 Instruction *Shl, const APInt &ShlOp1,
829 const APInt &DemandedMask,
831 if (!ShlOp1 || !ShrOp1)
832 return nullptr; // No-op.
834 Value *VarX = Shr->getOperand(0);
835 Type *Ty = VarX->getType();
836 unsigned BitWidth = Ty->getScalarSizeInBits();
837 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
838 return nullptr; // Undef.
840 unsigned ShlAmt = ShlOp1.getZExtValue();
841 unsigned ShrAmt = ShrOp1.getZExtValue();
843 Known.One.clearAllBits();
844 Known.Zero.setLowBits(ShlAmt - 1);
845 Known.Zero &= DemandedMask;
847 APInt BitMask1(APInt::getAllOnesValue(BitWidth));
848 APInt BitMask2(APInt::getAllOnesValue(BitWidth));
850 bool isLshr = (Shr->getOpcode() == Instruction::LShr);
851 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
852 (BitMask1.ashr(ShrAmt) << ShlAmt);
854 if (ShrAmt <= ShlAmt) {
855 BitMask2 <<= (ShlAmt - ShrAmt);
857 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
858 BitMask2.ashr(ShrAmt - ShlAmt);
861 // Check if condition-2 (see the comment to this function) is satified.
862 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
863 if (ShrAmt == ShlAmt)
866 if (!Shr->hasOneUse())
870 if (ShrAmt < ShlAmt) {
871 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
872 New = BinaryOperator::CreateShl(VarX, Amt);
873 BinaryOperator *Orig = cast<BinaryOperator>(Shl);
874 New->setHasNoSignedWrap(Orig->hasNoSignedWrap());
875 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap());
877 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
878 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
879 BinaryOperator::CreateAShr(VarX, Amt);
880 if (cast<BinaryOperator>(Shr)->isExact())
881 New->setIsExact(true);
884 return InsertNewInstWith(New, *Shl);
890 /// The specified value produces a vector with any number of elements.
891 /// DemandedElts contains the set of elements that are actually used by the
892 /// caller. This method analyzes which elements of the operand are undef and
893 /// returns that information in UndefElts.
895 /// If the information about demanded elements can be used to simplify the
896 /// operation, the operation is simplified, then the resultant value is
897 /// returned. This returns null if no change was made.
898 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
901 unsigned VWidth = V->getType()->getVectorNumElements();
902 APInt EltMask(APInt::getAllOnesValue(VWidth));
903 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
905 if (isa<UndefValue>(V)) {
906 // If the entire vector is undefined, just return this info.
911 if (DemandedElts.isNullValue()) { // If nothing is demanded, provide undef.
913 return UndefValue::get(V->getType());
918 // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential.
919 if (Constant *C = dyn_cast<Constant>(V)) {
920 // Check if this is identity. If so, return 0 since we are not simplifying
922 if (DemandedElts.isAllOnesValue())
925 Type *EltTy = cast<VectorType>(V->getType())->getElementType();
926 Constant *Undef = UndefValue::get(EltTy);
928 SmallVector<Constant*, 16> Elts;
929 for (unsigned i = 0; i != VWidth; ++i) {
930 if (!DemandedElts[i]) { // If not demanded, set to undef.
931 Elts.push_back(Undef);
936 Constant *Elt = C->getAggregateElement(i);
937 if (!Elt) return nullptr;
939 if (isa<UndefValue>(Elt)) { // Already undef.
940 Elts.push_back(Undef);
942 } else { // Otherwise, defined.
947 // If we changed the constant, return it.
948 Constant *NewCV = ConstantVector::get(Elts);
949 return NewCV != C ? NewCV : nullptr;
952 // Limit search depth.
956 // If multiple users are using the root value, proceed with
957 // simplification conservatively assuming that all elements
959 if (!V->hasOneUse()) {
960 // Quit if we find multiple users of a non-root value though.
961 // They'll be handled when it's their turn to be visited by
962 // the main instcombine process.
964 // TODO: Just compute the UndefElts information recursively.
967 // Conservatively assume that all elements are needed.
968 DemandedElts = EltMask;
971 Instruction *I = dyn_cast<Instruction>(V);
972 if (!I) return nullptr; // Only analyze instructions.
974 bool MadeChange = false;
975 APInt UndefElts2(VWidth, 0);
976 APInt UndefElts3(VWidth, 0);
978 switch (I->getOpcode()) {
981 case Instruction::InsertElement: {
982 // If this is a variable index, we don't know which element it overwrites.
983 // demand exactly the same input as we produce.
984 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
986 // Note that we can't propagate undef elt info, because we don't know
987 // which elt is getting updated.
988 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
989 UndefElts2, Depth + 1);
990 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
994 // If this is inserting an element that isn't demanded, remove this
996 unsigned IdxNo = Idx->getZExtValue();
997 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
999 return I->getOperand(0);
1002 // Otherwise, the element inserted overwrites whatever was there, so the
1003 // input demanded set is simpler than the output set.
1004 APInt DemandedElts2 = DemandedElts;
1005 DemandedElts2.clearBit(IdxNo);
1006 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
1007 UndefElts, Depth + 1);
1008 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1010 // The inserted element is defined.
1011 UndefElts.clearBit(IdxNo);
1014 case Instruction::ShuffleVector: {
1015 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1016 unsigned LHSVWidth =
1017 Shuffle->getOperand(0)->getType()->getVectorNumElements();
1018 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1019 for (unsigned i = 0; i < VWidth; i++) {
1020 if (DemandedElts[i]) {
1021 unsigned MaskVal = Shuffle->getMaskValue(i);
1022 if (MaskVal != -1u) {
1023 assert(MaskVal < LHSVWidth * 2 &&
1024 "shufflevector mask index out of range!");
1025 if (MaskVal < LHSVWidth)
1026 LeftDemanded.setBit(MaskVal);
1028 RightDemanded.setBit(MaskVal - LHSVWidth);
1033 APInt LHSUndefElts(LHSVWidth, 0);
1034 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1035 LHSUndefElts, Depth + 1);
1036 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1038 APInt RHSUndefElts(LHSVWidth, 0);
1039 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1040 RHSUndefElts, Depth + 1);
1041 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1043 bool NewUndefElts = false;
1044 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1045 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1046 bool LHSUniform = true;
1047 bool RHSUniform = true;
1048 for (unsigned i = 0; i < VWidth; i++) {
1049 unsigned MaskVal = Shuffle->getMaskValue(i);
1050 if (MaskVal == -1u) {
1051 UndefElts.setBit(i);
1052 } else if (!DemandedElts[i]) {
1053 NewUndefElts = true;
1054 UndefElts.setBit(i);
1055 } else if (MaskVal < LHSVWidth) {
1056 if (LHSUndefElts[MaskVal]) {
1057 NewUndefElts = true;
1058 UndefElts.setBit(i);
1060 LHSIdx = LHSIdx == -1u ? i : LHSVWidth;
1061 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth;
1062 LHSUniform = LHSUniform && (MaskVal == i);
1065 if (RHSUndefElts[MaskVal - LHSVWidth]) {
1066 NewUndefElts = true;
1067 UndefElts.setBit(i);
1069 RHSIdx = RHSIdx == -1u ? i : LHSVWidth;
1070 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth;
1071 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i);
1076 // Try to transform shuffle with constant vector and single element from
1077 // this constant vector to single insertelement instruction.
1078 // shufflevector V, C, <v1, v2, .., ci, .., vm> ->
1079 // insertelement V, C[ci], ci-n
1080 if (LHSVWidth == Shuffle->getType()->getNumElements()) {
1081 Value *Op = nullptr;
1082 Constant *Value = nullptr;
1085 // Find constant vector with the single element in shuffle (LHS or RHS).
1086 if (LHSIdx < LHSVWidth && RHSUniform) {
1087 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1088 Op = Shuffle->getOperand(1);
1089 Value = CV->getOperand(LHSValIdx);
1093 if (RHSIdx < LHSVWidth && LHSUniform) {
1094 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1095 Op = Shuffle->getOperand(0);
1096 Value = CV->getOperand(RHSValIdx);
1100 // Found constant vector with single element - convert to insertelement.
1102 Instruction *New = InsertElementInst::Create(
1103 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx),
1104 Shuffle->getName());
1105 InsertNewInstWith(New, *Shuffle);
1110 // Add additional discovered undefs.
1111 SmallVector<Constant*, 16> Elts;
1112 for (unsigned i = 0; i < VWidth; ++i) {
1114 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
1116 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
1117 Shuffle->getMaskValue(i)));
1119 I->setOperand(2, ConstantVector::get(Elts));
1124 case Instruction::Select: {
1125 APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts);
1126 if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) {
1127 for (unsigned i = 0; i < VWidth; i++) {
1128 Constant *CElt = CV->getAggregateElement(i);
1129 // Method isNullValue always returns false when called on a
1130 // ConstantExpr. If CElt is a ConstantExpr then skip it in order to
1131 // to avoid propagating incorrect information.
1132 if (isa<ConstantExpr>(CElt))
1134 if (CElt->isNullValue())
1135 LeftDemanded.clearBit(i);
1137 RightDemanded.clearBit(i);
1141 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts,
1143 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1145 TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded,
1146 UndefElts2, Depth + 1);
1147 if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; }
1149 // Output elements are undefined if both are undefined.
1150 UndefElts &= UndefElts2;
1153 case Instruction::BitCast: {
1154 // Vector->vector casts only.
1155 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1157 unsigned InVWidth = VTy->getNumElements();
1158 APInt InputDemandedElts(InVWidth, 0);
1159 UndefElts2 = APInt(InVWidth, 0);
1162 if (VWidth == InVWidth) {
1163 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1164 // elements as are demanded of us.
1166 InputDemandedElts = DemandedElts;
1167 } else if ((VWidth % InVWidth) == 0) {
1168 // If the number of elements in the output is a multiple of the number of
1169 // elements in the input then an input element is live if any of the
1170 // corresponding output elements are live.
1171 Ratio = VWidth / InVWidth;
1172 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1173 if (DemandedElts[OutIdx])
1174 InputDemandedElts.setBit(OutIdx / Ratio);
1175 } else if ((InVWidth % VWidth) == 0) {
1176 // If the number of elements in the input is a multiple of the number of
1177 // elements in the output then an input element is live if the
1178 // corresponding output element is live.
1179 Ratio = InVWidth / VWidth;
1180 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1181 if (DemandedElts[InIdx / Ratio])
1182 InputDemandedElts.setBit(InIdx);
1184 // Unsupported so far.
1188 // div/rem demand all inputs, because they don't want divide by zero.
1189 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1190 UndefElts2, Depth + 1);
1192 I->setOperand(0, TmpV);
1196 if (VWidth == InVWidth) {
1197 UndefElts = UndefElts2;
1198 } else if ((VWidth % InVWidth) == 0) {
1199 // If the number of elements in the output is a multiple of the number of
1200 // elements in the input then an output element is undef if the
1201 // corresponding input element is undef.
1202 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1203 if (UndefElts2[OutIdx / Ratio])
1204 UndefElts.setBit(OutIdx);
1205 } else if ((InVWidth % VWidth) == 0) {
1206 // If the number of elements in the input is a multiple of the number of
1207 // elements in the output then an output element is undef if all of the
1208 // corresponding input elements are undef.
1209 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1210 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1211 if (SubUndef.countPopulation() == Ratio)
1212 UndefElts.setBit(OutIdx);
1215 llvm_unreachable("Unimp");
1219 case Instruction::And:
1220 case Instruction::Or:
1221 case Instruction::Xor:
1222 case Instruction::Add:
1223 case Instruction::Sub:
1224 case Instruction::Mul:
1225 // div/rem demand all inputs, because they don't want divide by zero.
1226 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
1228 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1229 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1230 UndefElts2, Depth + 1);
1231 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1233 // Output elements are undefined if both are undefined. Consider things
1234 // like undef&0. The result is known zero, not undef.
1235 UndefElts &= UndefElts2;
1237 case Instruction::FPTrunc:
1238 case Instruction::FPExt:
1239 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
1241 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1244 case Instruction::Call: {
1245 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1247 switch (II->getIntrinsicID()) {
1250 case Intrinsic::x86_xop_vfrcz_ss:
1251 case Intrinsic::x86_xop_vfrcz_sd:
1252 // The instructions for these intrinsics are speced to zero upper bits not
1253 // pass them through like other scalar intrinsics. So we shouldn't just
1254 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics.
1255 // Instead we should return a zero vector.
1256 if (!DemandedElts[0]) {
1258 return ConstantAggregateZero::get(II->getType());
1261 // Only the lower element is used.
1263 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1264 UndefElts, Depth + 1);
1265 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1267 // Only the lower element is undefined. The high elements are zero.
1268 UndefElts = UndefElts[0];
1271 // Unary scalar-as-vector operations that work column-wise.
1272 case Intrinsic::x86_sse_rcp_ss:
1273 case Intrinsic::x86_sse_rsqrt_ss:
1274 case Intrinsic::x86_sse_sqrt_ss:
1275 case Intrinsic::x86_sse2_sqrt_sd:
1276 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1277 UndefElts, Depth + 1);
1278 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1280 // If lowest element of a scalar op isn't used then use Arg0.
1281 if (!DemandedElts[0]) {
1283 return II->getArgOperand(0);
1285 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions
1289 // Binary scalar-as-vector operations that work column-wise. The high
1290 // elements come from operand 0. The low element is a function of both
1292 case Intrinsic::x86_sse_min_ss:
1293 case Intrinsic::x86_sse_max_ss:
1294 case Intrinsic::x86_sse_cmp_ss:
1295 case Intrinsic::x86_sse2_min_sd:
1296 case Intrinsic::x86_sse2_max_sd:
1297 case Intrinsic::x86_sse2_cmp_sd: {
1298 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1299 UndefElts, Depth + 1);
1300 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1302 // If lowest element of a scalar op isn't used then use Arg0.
1303 if (!DemandedElts[0]) {
1305 return II->getArgOperand(0);
1308 // Only lower element is used for operand 1.
1310 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1311 UndefElts2, Depth + 1);
1312 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1314 // Lower element is undefined if both lower elements are undefined.
1315 // Consider things like undef&0. The result is known zero, not undef.
1317 UndefElts.clearBit(0);
1322 // Binary scalar-as-vector operations that work column-wise. The high
1323 // elements come from operand 0 and the low element comes from operand 1.
1324 case Intrinsic::x86_sse41_round_ss:
1325 case Intrinsic::x86_sse41_round_sd: {
1326 // Don't use the low element of operand 0.
1327 APInt DemandedElts2 = DemandedElts;
1328 DemandedElts2.clearBit(0);
1329 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2,
1330 UndefElts, Depth + 1);
1331 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1333 // If lowest element of a scalar op isn't used then use Arg0.
1334 if (!DemandedElts[0]) {
1336 return II->getArgOperand(0);
1339 // Only lower element is used for operand 1.
1341 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1342 UndefElts2, Depth + 1);
1343 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1345 // Take the high undef elements from operand 0 and take the lower element
1347 UndefElts.clearBit(0);
1348 UndefElts |= UndefElts2[0];
1352 // Three input scalar-as-vector operations that work column-wise. The high
1353 // elements come from operand 0 and the low element is a function of all
1355 case Intrinsic::x86_avx512_mask_add_ss_round:
1356 case Intrinsic::x86_avx512_mask_div_ss_round:
1357 case Intrinsic::x86_avx512_mask_mul_ss_round:
1358 case Intrinsic::x86_avx512_mask_sub_ss_round:
1359 case Intrinsic::x86_avx512_mask_max_ss_round:
1360 case Intrinsic::x86_avx512_mask_min_ss_round:
1361 case Intrinsic::x86_avx512_mask_add_sd_round:
1362 case Intrinsic::x86_avx512_mask_div_sd_round:
1363 case Intrinsic::x86_avx512_mask_mul_sd_round:
1364 case Intrinsic::x86_avx512_mask_sub_sd_round:
1365 case Intrinsic::x86_avx512_mask_max_sd_round:
1366 case Intrinsic::x86_avx512_mask_min_sd_round:
1367 case Intrinsic::x86_fma_vfmadd_ss:
1368 case Intrinsic::x86_fma_vfmsub_ss:
1369 case Intrinsic::x86_fma_vfnmadd_ss:
1370 case Intrinsic::x86_fma_vfnmsub_ss:
1371 case Intrinsic::x86_fma_vfmadd_sd:
1372 case Intrinsic::x86_fma_vfmsub_sd:
1373 case Intrinsic::x86_fma_vfnmadd_sd:
1374 case Intrinsic::x86_fma_vfnmsub_sd:
1375 case Intrinsic::x86_avx512_mask_vfmadd_ss:
1376 case Intrinsic::x86_avx512_mask_vfmadd_sd:
1377 case Intrinsic::x86_avx512_maskz_vfmadd_ss:
1378 case Intrinsic::x86_avx512_maskz_vfmadd_sd:
1379 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1380 UndefElts, Depth + 1);
1381 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1383 // If lowest element of a scalar op isn't used then use Arg0.
1384 if (!DemandedElts[0]) {
1386 return II->getArgOperand(0);
1389 // Only lower element is used for operand 1 and 2.
1391 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1392 UndefElts2, Depth + 1);
1393 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1394 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts,
1395 UndefElts3, Depth + 1);
1396 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; }
1398 // Lower element is undefined if all three lower elements are undefined.
1399 // Consider things like undef&0. The result is known zero, not undef.
1400 if (!UndefElts2[0] || !UndefElts3[0])
1401 UndefElts.clearBit(0);
1405 case Intrinsic::x86_avx512_mask3_vfmadd_ss:
1406 case Intrinsic::x86_avx512_mask3_vfmadd_sd:
1407 case Intrinsic::x86_avx512_mask3_vfmsub_ss:
1408 case Intrinsic::x86_avx512_mask3_vfmsub_sd:
1409 case Intrinsic::x86_avx512_mask3_vfnmsub_ss:
1410 case Intrinsic::x86_avx512_mask3_vfnmsub_sd:
1411 // These intrinsics get the passthru bits from operand 2.
1412 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts,
1413 UndefElts, Depth + 1);
1414 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; }
1416 // If lowest element of a scalar op isn't used then use Arg2.
1417 if (!DemandedElts[0]) {
1419 return II->getArgOperand(2);
1422 // Only lower element is used for operand 0 and 1.
1424 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1425 UndefElts2, Depth + 1);
1426 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1427 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1428 UndefElts3, Depth + 1);
1429 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1431 // Lower element is undefined if all three lower elements are undefined.
1432 // Consider things like undef&0. The result is known zero, not undef.
1433 if (!UndefElts2[0] || !UndefElts3[0])
1434 UndefElts.clearBit(0);
1438 case Intrinsic::x86_sse2_pmulu_dq:
1439 case Intrinsic::x86_sse41_pmuldq:
1440 case Intrinsic::x86_avx2_pmul_dq:
1441 case Intrinsic::x86_avx2_pmulu_dq:
1442 case Intrinsic::x86_avx512_pmul_dq_512:
1443 case Intrinsic::x86_avx512_pmulu_dq_512: {
1444 Value *Op0 = II->getArgOperand(0);
1445 Value *Op1 = II->getArgOperand(1);
1446 unsigned InnerVWidth = Op0->getType()->getVectorNumElements();
1447 assert((VWidth * 2) == InnerVWidth && "Unexpected input size");
1449 APInt InnerDemandedElts(InnerVWidth, 0);
1450 for (unsigned i = 0; i != VWidth; ++i)
1451 if (DemandedElts[i])
1452 InnerDemandedElts.setBit(i * 2);
1454 UndefElts2 = APInt(InnerVWidth, 0);
1455 TmpV = SimplifyDemandedVectorElts(Op0, InnerDemandedElts, UndefElts2,
1457 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1459 UndefElts3 = APInt(InnerVWidth, 0);
1460 TmpV = SimplifyDemandedVectorElts(Op1, InnerDemandedElts, UndefElts3,
1462 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1467 case Intrinsic::x86_sse2_packssdw_128:
1468 case Intrinsic::x86_sse2_packsswb_128:
1469 case Intrinsic::x86_sse2_packuswb_128:
1470 case Intrinsic::x86_sse41_packusdw:
1471 case Intrinsic::x86_avx2_packssdw:
1472 case Intrinsic::x86_avx2_packsswb:
1473 case Intrinsic::x86_avx2_packusdw:
1474 case Intrinsic::x86_avx2_packuswb:
1475 case Intrinsic::x86_avx512_packssdw_512:
1476 case Intrinsic::x86_avx512_packsswb_512:
1477 case Intrinsic::x86_avx512_packusdw_512:
1478 case Intrinsic::x86_avx512_packuswb_512: {
1479 auto *Ty0 = II->getArgOperand(0)->getType();
1480 unsigned InnerVWidth = Ty0->getVectorNumElements();
1481 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size");
1483 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1484 unsigned VWidthPerLane = VWidth / NumLanes;
1485 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1487 // Per lane, pack the elements of the first input and then the second.
1489 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3])
1490 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15])
1491 for (int OpNum = 0; OpNum != 2; ++OpNum) {
1492 APInt OpDemandedElts(InnerVWidth, 0);
1493 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1494 unsigned LaneIdx = Lane * VWidthPerLane;
1495 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1496 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1497 if (DemandedElts[Idx])
1498 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt);
1502 // Demand elements from the operand.
1503 auto *Op = II->getArgOperand(OpNum);
1504 APInt OpUndefElts(InnerVWidth, 0);
1505 TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts,
1508 II->setArgOperand(OpNum, TmpV);
1512 // Pack the operand's UNDEF elements, one lane at a time.
1513 OpUndefElts = OpUndefElts.zext(VWidth);
1514 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1515 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
1516 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
1517 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
1518 UndefElts |= LaneElts;
1525 case Intrinsic::x86_ssse3_pshuf_b_128:
1526 case Intrinsic::x86_avx2_pshuf_b:
1527 case Intrinsic::x86_avx512_pshuf_b_512:
1529 case Intrinsic::x86_avx_vpermilvar_ps:
1530 case Intrinsic::x86_avx_vpermilvar_ps_256:
1531 case Intrinsic::x86_avx512_vpermilvar_ps_512:
1532 case Intrinsic::x86_avx_vpermilvar_pd:
1533 case Intrinsic::x86_avx_vpermilvar_pd_256:
1534 case Intrinsic::x86_avx512_vpermilvar_pd_512:
1536 case Intrinsic::x86_avx2_permd:
1537 case Intrinsic::x86_avx2_permps: {
1538 Value *Op1 = II->getArgOperand(1);
1539 TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts,
1541 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1545 // SSE4A instructions leave the upper 64-bits of the 128-bit result
1546 // in an undefined state.
1547 case Intrinsic::x86_sse4a_extrq:
1548 case Intrinsic::x86_sse4a_extrqi:
1549 case Intrinsic::x86_sse4a_insertq:
1550 case Intrinsic::x86_sse4a_insertqi:
1551 UndefElts.setHighBits(VWidth / 2);
1553 case Intrinsic::amdgcn_buffer_load:
1554 case Intrinsic::amdgcn_buffer_load_format:
1555 case Intrinsic::amdgcn_image_sample:
1556 case Intrinsic::amdgcn_image_sample_cl:
1557 case Intrinsic::amdgcn_image_sample_d:
1558 case Intrinsic::amdgcn_image_sample_d_cl:
1559 case Intrinsic::amdgcn_image_sample_l:
1560 case Intrinsic::amdgcn_image_sample_b:
1561 case Intrinsic::amdgcn_image_sample_b_cl:
1562 case Intrinsic::amdgcn_image_sample_lz:
1563 case Intrinsic::amdgcn_image_sample_cd:
1564 case Intrinsic::amdgcn_image_sample_cd_cl:
1566 case Intrinsic::amdgcn_image_sample_c:
1567 case Intrinsic::amdgcn_image_sample_c_cl:
1568 case Intrinsic::amdgcn_image_sample_c_d:
1569 case Intrinsic::amdgcn_image_sample_c_d_cl:
1570 case Intrinsic::amdgcn_image_sample_c_l:
1571 case Intrinsic::amdgcn_image_sample_c_b:
1572 case Intrinsic::amdgcn_image_sample_c_b_cl:
1573 case Intrinsic::amdgcn_image_sample_c_lz:
1574 case Intrinsic::amdgcn_image_sample_c_cd:
1575 case Intrinsic::amdgcn_image_sample_c_cd_cl:
1577 case Intrinsic::amdgcn_image_sample_o:
1578 case Intrinsic::amdgcn_image_sample_cl_o:
1579 case Intrinsic::amdgcn_image_sample_d_o:
1580 case Intrinsic::amdgcn_image_sample_d_cl_o:
1581 case Intrinsic::amdgcn_image_sample_l_o:
1582 case Intrinsic::amdgcn_image_sample_b_o:
1583 case Intrinsic::amdgcn_image_sample_b_cl_o:
1584 case Intrinsic::amdgcn_image_sample_lz_o:
1585 case Intrinsic::amdgcn_image_sample_cd_o:
1586 case Intrinsic::amdgcn_image_sample_cd_cl_o:
1588 case Intrinsic::amdgcn_image_sample_c_o:
1589 case Intrinsic::amdgcn_image_sample_c_cl_o:
1590 case Intrinsic::amdgcn_image_sample_c_d_o:
1591 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
1592 case Intrinsic::amdgcn_image_sample_c_l_o:
1593 case Intrinsic::amdgcn_image_sample_c_b_o:
1594 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
1595 case Intrinsic::amdgcn_image_sample_c_lz_o:
1596 case Intrinsic::amdgcn_image_sample_c_cd_o:
1597 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
1599 case Intrinsic::amdgcn_image_getlod: {
1600 if (VWidth == 1 || !DemandedElts.isMask())
1603 // TODO: Handle 3 vectors when supported in code gen.
1604 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countTrailingOnes());
1605 if (NewNumElts == VWidth)
1608 Module *M = II->getParent()->getParent()->getParent();
1609 Type *EltTy = V->getType()->getVectorElementType();
1611 Type *NewTy = (NewNumElts == 1) ? EltTy :
1612 VectorType::get(EltTy, NewNumElts);
1614 auto IID = II->getIntrinsicID();
1616 bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load ||
1617 IID == Intrinsic::amdgcn_buffer_load_format;
1619 Function *NewIntrin = IsBuffer ?
1620 Intrinsic::getDeclaration(M, IID, NewTy) :
1621 // Samplers have 3 mangled types.
1622 Intrinsic::getDeclaration(M, IID,
1623 { NewTy, II->getArgOperand(0)->getType(),
1624 II->getArgOperand(1)->getType()});
1626 SmallVector<Value *, 5> Args;
1627 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
1628 Args.push_back(II->getArgOperand(I));
1630 IRBuilderBase::InsertPointGuard Guard(*Builder);
1631 Builder->SetInsertPoint(II);
1633 CallInst *NewCall = Builder->CreateCall(NewIntrin, Args);
1634 NewCall->takeName(II);
1635 NewCall->copyMetadata(*II);
1638 ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3));
1640 unsigned DMaskVal = DMask->getZExtValue() & 0xf;
1642 unsigned PopCnt = 0;
1643 unsigned NewDMask = 0;
1644 for (unsigned I = 0; I < 4; ++I) {
1645 const unsigned Bit = 1 << I;
1646 if (!!(DMaskVal & Bit)) {
1647 if (++PopCnt > NewNumElts)
1654 NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask));
1659 if (NewNumElts == 1) {
1660 return Builder->CreateInsertElement(UndefValue::get(V->getType()),
1661 NewCall, static_cast<uint64_t>(0));
1664 SmallVector<uint32_t, 8> EltMask;
1665 for (unsigned I = 0; I < VWidth; ++I)
1666 EltMask.push_back(I);
1668 Value *Shuffle = Builder->CreateShuffleVector(
1669 NewCall, UndefValue::get(NewTy), EltMask);
1678 return MadeChange ? I : nullptr;