1 //===- InstCombineCompares.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitICmp and visitFCmp functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APSInt.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/GetElementPtrTypeIterator.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/Debug.h"
31 using namespace PatternMatch;
33 #define DEBUG_TYPE "instcombine"
35 // How many times is a select replaced by one of its operands?
36 STATISTIC(NumSel, "Number of select opts");
39 static ConstantInt *extractElement(Constant *V, Constant *Idx) {
40 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
43 static bool hasAddOverflow(ConstantInt *Result,
44 ConstantInt *In1, ConstantInt *In2,
47 return Result->getValue().ult(In1->getValue());
49 if (In2->isNegative())
50 return Result->getValue().sgt(In1->getValue());
51 return Result->getValue().slt(In1->getValue());
54 /// Compute Result = In1+In2, returning true if the result overflowed for this
56 static bool addWithOverflow(Constant *&Result, Constant *In1,
57 Constant *In2, bool IsSigned = false) {
58 Result = ConstantExpr::getAdd(In1, In2);
60 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
61 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
62 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
63 if (hasAddOverflow(extractElement(Result, Idx),
64 extractElement(In1, Idx),
65 extractElement(In2, Idx),
72 return hasAddOverflow(cast<ConstantInt>(Result),
73 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
77 static bool hasSubOverflow(ConstantInt *Result,
78 ConstantInt *In1, ConstantInt *In2,
81 return Result->getValue().ugt(In1->getValue());
83 if (In2->isNegative())
84 return Result->getValue().slt(In1->getValue());
86 return Result->getValue().sgt(In1->getValue());
89 /// Compute Result = In1-In2, returning true if the result overflowed for this
91 static bool subWithOverflow(Constant *&Result, Constant *In1,
92 Constant *In2, bool IsSigned = false) {
93 Result = ConstantExpr::getSub(In1, In2);
95 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
96 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
97 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
98 if (hasSubOverflow(extractElement(Result, Idx),
99 extractElement(In1, Idx),
100 extractElement(In2, Idx),
107 return hasSubOverflow(cast<ConstantInt>(Result),
108 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
112 /// Given an icmp instruction, return true if any use of this comparison is a
113 /// branch on sign bit comparison.
114 static bool isBranchOnSignBitCheck(ICmpInst &I, bool isSignBit) {
115 for (auto *U : I.users())
116 if (isa<BranchInst>(U))
121 /// Given an exploded icmp instruction, return true if the comparison only
122 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the
123 /// result of the comparison is true when the input value is signed.
124 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
125 bool &TrueIfSigned) {
127 case ICmpInst::ICMP_SLT: // True if LHS s< 0
130 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
132 return RHS.isAllOnesValue();
133 case ICmpInst::ICMP_SGT: // True if LHS s> -1
134 TrueIfSigned = false;
135 return RHS.isAllOnesValue();
136 case ICmpInst::ICMP_UGT:
137 // True if LHS u> RHS and RHS == high-bit-mask - 1
139 return RHS.isMaxSignedValue();
140 case ICmpInst::ICMP_UGE:
141 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
143 return RHS.isSignBit();
149 /// Returns true if the exploded icmp can be expressed as a signed comparison
150 /// to zero and updates the predicate accordingly.
151 /// The signedness of the comparison is preserved.
152 /// TODO: Refactor with decomposeBitTestICmp()?
153 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
154 if (!ICmpInst::isSigned(Pred))
158 return ICmpInst::isRelational(Pred);
161 if (Pred == ICmpInst::ICMP_SLT) {
162 Pred = ICmpInst::ICMP_SLE;
165 } else if (C.isAllOnesValue()) {
166 if (Pred == ICmpInst::ICMP_SGT) {
167 Pred = ICmpInst::ICMP_SGE;
175 /// Given a signed integer type and a set of known zero and one bits, compute
176 /// the maximum and minimum values that could have the specified known zero and
177 /// known one bits, returning them in Min/Max.
178 static void computeSignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
179 const APInt &KnownOne,
180 APInt &Min, APInt &Max) {
181 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
182 KnownZero.getBitWidth() == Min.getBitWidth() &&
183 KnownZero.getBitWidth() == Max.getBitWidth() &&
184 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
185 APInt UnknownBits = ~(KnownZero|KnownOne);
187 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
188 // bit if it is unknown.
190 Max = KnownOne|UnknownBits;
192 if (UnknownBits.isNegative()) { // Sign bit is unknown
193 Min.setBit(Min.getBitWidth()-1);
194 Max.clearBit(Max.getBitWidth()-1);
198 /// Given an unsigned integer type and a set of known zero and one bits, compute
199 /// the maximum and minimum values that could have the specified known zero and
200 /// known one bits, returning them in Min/Max.
201 static void computeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
202 const APInt &KnownOne,
203 APInt &Min, APInt &Max) {
204 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
205 KnownZero.getBitWidth() == Min.getBitWidth() &&
206 KnownZero.getBitWidth() == Max.getBitWidth() &&
207 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
208 APInt UnknownBits = ~(KnownZero|KnownOne);
210 // The minimum value is when the unknown bits are all zeros.
212 // The maximum value is when the unknown bits are all ones.
213 Max = KnownOne|UnknownBits;
216 /// This is called when we see this pattern:
217 /// cmp pred (load (gep GV, ...)), cmpcst
218 /// where GV is a global variable with a constant initializer. Try to simplify
219 /// this into some simple computation that does not need the load. For example
220 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
222 /// If AndCst is non-null, then the loaded value is masked with that constant
223 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
224 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
227 ConstantInt *AndCst) {
228 Constant *Init = GV->getInitializer();
229 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
232 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
233 // Don't blow up on huge arrays.
234 if (ArrayElementCount > MaxArraySizeForCombine)
237 // There are many forms of this optimization we can handle, for now, just do
238 // the simple index into a single-dimensional array.
240 // Require: GEP GV, 0, i {{, constant indices}}
241 if (GEP->getNumOperands() < 3 ||
242 !isa<ConstantInt>(GEP->getOperand(1)) ||
243 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
244 isa<Constant>(GEP->getOperand(2)))
247 // Check that indices after the variable are constants and in-range for the
248 // type they index. Collect the indices. This is typically for arrays of
250 SmallVector<unsigned, 4> LaterIndices;
252 Type *EltTy = Init->getType()->getArrayElementType();
253 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
254 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
255 if (!Idx) return nullptr; // Variable index.
257 uint64_t IdxVal = Idx->getZExtValue();
258 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
260 if (StructType *STy = dyn_cast<StructType>(EltTy))
261 EltTy = STy->getElementType(IdxVal);
262 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
263 if (IdxVal >= ATy->getNumElements()) return nullptr;
264 EltTy = ATy->getElementType();
266 return nullptr; // Unknown type.
269 LaterIndices.push_back(IdxVal);
272 enum { Overdefined = -3, Undefined = -2 };
274 // Variables for our state machines.
276 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
277 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
278 // and 87 is the second (and last) index. FirstTrueElement is -2 when
279 // undefined, otherwise set to the first true element. SecondTrueElement is
280 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
281 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
283 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
284 // form "i != 47 & i != 87". Same state transitions as for true elements.
285 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
287 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
288 /// define a state machine that triggers for ranges of values that the index
289 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
290 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
291 /// index in the range (inclusive). We use -2 for undefined here because we
292 /// use relative comparisons and don't want 0-1 to match -1.
293 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
295 // MagicBitvector - This is a magic bitvector where we set a bit if the
296 // comparison is true for element 'i'. If there are 64 elements or less in
297 // the array, this will fully represent all the comparison results.
298 uint64_t MagicBitvector = 0;
300 // Scan the array and see if one of our patterns matches.
301 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
302 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
303 Constant *Elt = Init->getAggregateElement(i);
304 if (!Elt) return nullptr;
306 // If this is indexing an array of structures, get the structure element.
307 if (!LaterIndices.empty())
308 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
310 // If the element is masked, handle it.
311 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
313 // Find out if the comparison would be true or false for the i'th element.
314 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
315 CompareRHS, DL, &TLI);
316 // If the result is undef for this element, ignore it.
317 if (isa<UndefValue>(C)) {
318 // Extend range state machines to cover this element in case there is an
319 // undef in the middle of the range.
320 if (TrueRangeEnd == (int)i-1)
322 if (FalseRangeEnd == (int)i-1)
327 // If we can't compute the result for any of the elements, we have to give
328 // up evaluating the entire conditional.
329 if (!isa<ConstantInt>(C)) return nullptr;
331 // Otherwise, we know if the comparison is true or false for this element,
332 // update our state machines.
333 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
335 // State machine for single/double/range index comparison.
337 // Update the TrueElement state machine.
338 if (FirstTrueElement == Undefined)
339 FirstTrueElement = TrueRangeEnd = i; // First true element.
341 // Update double-compare state machine.
342 if (SecondTrueElement == Undefined)
343 SecondTrueElement = i;
345 SecondTrueElement = Overdefined;
347 // Update range state machine.
348 if (TrueRangeEnd == (int)i-1)
351 TrueRangeEnd = Overdefined;
354 // Update the FalseElement state machine.
355 if (FirstFalseElement == Undefined)
356 FirstFalseElement = FalseRangeEnd = i; // First false element.
358 // Update double-compare state machine.
359 if (SecondFalseElement == Undefined)
360 SecondFalseElement = i;
362 SecondFalseElement = Overdefined;
364 // Update range state machine.
365 if (FalseRangeEnd == (int)i-1)
368 FalseRangeEnd = Overdefined;
372 // If this element is in range, update our magic bitvector.
373 if (i < 64 && IsTrueForElt)
374 MagicBitvector |= 1ULL << i;
376 // If all of our states become overdefined, bail out early. Since the
377 // predicate is expensive, only check it every 8 elements. This is only
378 // really useful for really huge arrays.
379 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
380 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
381 FalseRangeEnd == Overdefined)
385 // Now that we've scanned the entire array, emit our new comparison(s). We
386 // order the state machines in complexity of the generated code.
387 Value *Idx = GEP->getOperand(2);
389 // If the index is larger than the pointer size of the target, truncate the
390 // index down like the GEP would do implicitly. We don't have to do this for
391 // an inbounds GEP because the index can't be out of range.
392 if (!GEP->isInBounds()) {
393 Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
394 unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
395 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
396 Idx = Builder->CreateTrunc(Idx, IntPtrTy);
399 // If the comparison is only true for one or two elements, emit direct
401 if (SecondTrueElement != Overdefined) {
402 // None true -> false.
403 if (FirstTrueElement == Undefined)
404 return replaceInstUsesWith(ICI, Builder->getFalse());
406 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
408 // True for one element -> 'i == 47'.
409 if (SecondTrueElement == Undefined)
410 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
412 // True for two elements -> 'i == 47 | i == 72'.
413 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
414 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
415 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
416 return BinaryOperator::CreateOr(C1, C2);
419 // If the comparison is only false for one or two elements, emit direct
421 if (SecondFalseElement != Overdefined) {
422 // None false -> true.
423 if (FirstFalseElement == Undefined)
424 return replaceInstUsesWith(ICI, Builder->getTrue());
426 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
428 // False for one element -> 'i != 47'.
429 if (SecondFalseElement == Undefined)
430 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
432 // False for two elements -> 'i != 47 & i != 72'.
433 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
434 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
435 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
436 return BinaryOperator::CreateAnd(C1, C2);
439 // If the comparison can be replaced with a range comparison for the elements
440 // where it is true, emit the range check.
441 if (TrueRangeEnd != Overdefined) {
442 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
444 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
445 if (FirstTrueElement) {
446 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
447 Idx = Builder->CreateAdd(Idx, Offs);
450 Value *End = ConstantInt::get(Idx->getType(),
451 TrueRangeEnd-FirstTrueElement+1);
452 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
455 // False range check.
456 if (FalseRangeEnd != Overdefined) {
457 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
458 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
459 if (FirstFalseElement) {
460 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
461 Idx = Builder->CreateAdd(Idx, Offs);
464 Value *End = ConstantInt::get(Idx->getType(),
465 FalseRangeEnd-FirstFalseElement);
466 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
469 // If a magic bitvector captures the entire comparison state
470 // of this load, replace it with computation that does:
471 // ((magic_cst >> i) & 1) != 0
475 // Look for an appropriate type:
476 // - The type of Idx if the magic fits
477 // - The smallest fitting legal type if we have a DataLayout
479 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
482 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
485 Value *V = Builder->CreateIntCast(Idx, Ty, false);
486 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
487 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
488 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
495 /// Return a value that can be used to compare the *offset* implied by a GEP to
496 /// zero. For example, if we have &A[i], we want to return 'i' for
497 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
498 /// are involved. The above expression would also be legal to codegen as
499 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
500 /// This latter form is less amenable to optimization though, and we are allowed
501 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
503 /// If we can't emit an optimized form for this expression, this returns null.
505 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
506 const DataLayout &DL) {
507 gep_type_iterator GTI = gep_type_begin(GEP);
509 // Check to see if this gep only has a single variable index. If so, and if
510 // any constant indices are a multiple of its scale, then we can compute this
511 // in terms of the scale of the variable index. For example, if the GEP
512 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
513 // because the expression will cross zero at the same point.
514 unsigned i, e = GEP->getNumOperands();
516 for (i = 1; i != e; ++i, ++GTI) {
517 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
518 // Compute the aggregate offset of constant indices.
519 if (CI->isZero()) continue;
521 // Handle a struct index, which adds its field offset to the pointer.
522 if (StructType *STy = GTI.getStructTypeOrNull()) {
523 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
525 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
526 Offset += Size*CI->getSExtValue();
529 // Found our variable index.
534 // If there are no variable indices, we must have a constant offset, just
535 // evaluate it the general way.
536 if (i == e) return nullptr;
538 Value *VariableIdx = GEP->getOperand(i);
539 // Determine the scale factor of the variable element. For example, this is
540 // 4 if the variable index is into an array of i32.
541 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
543 // Verify that there are no other variable indices. If so, emit the hard way.
544 for (++i, ++GTI; i != e; ++i, ++GTI) {
545 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
546 if (!CI) return nullptr;
548 // Compute the aggregate offset of constant indices.
549 if (CI->isZero()) continue;
551 // Handle a struct index, which adds its field offset to the pointer.
552 if (StructType *STy = GTI.getStructTypeOrNull()) {
553 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
555 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
556 Offset += Size*CI->getSExtValue();
560 // Okay, we know we have a single variable index, which must be a
561 // pointer/array/vector index. If there is no offset, life is simple, return
563 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
564 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
566 // Cast to intptrty in case a truncation occurs. If an extension is needed,
567 // we don't need to bother extending: the extension won't affect where the
568 // computation crosses zero.
569 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
570 VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
575 // Otherwise, there is an index. The computation we will do will be modulo
576 // the pointer size, so get it.
577 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
579 Offset &= PtrSizeMask;
580 VariableScale &= PtrSizeMask;
582 // To do this transformation, any constant index must be a multiple of the
583 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
584 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
585 // multiple of the variable scale.
586 int64_t NewOffs = Offset / (int64_t)VariableScale;
587 if (Offset != NewOffs*(int64_t)VariableScale)
590 // Okay, we can do this evaluation. Start by converting the index to intptr.
591 if (VariableIdx->getType() != IntPtrTy)
592 VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
594 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
595 return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset");
598 /// Returns true if we can rewrite Start as a GEP with pointer Base
599 /// and some integer offset. The nodes that need to be re-written
600 /// for this transformation will be added to Explored.
601 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
602 const DataLayout &DL,
603 SetVector<Value *> &Explored) {
604 SmallVector<Value *, 16> WorkList(1, Start);
605 Explored.insert(Base);
607 // The following traversal gives us an order which can be used
608 // when doing the final transformation. Since in the final
609 // transformation we create the PHI replacement instructions first,
610 // we don't have to get them in any particular order.
612 // However, for other instructions we will have to traverse the
613 // operands of an instruction first, which means that we have to
614 // do a post-order traversal.
615 while (!WorkList.empty()) {
616 SetVector<PHINode *> PHIs;
618 while (!WorkList.empty()) {
619 if (Explored.size() >= 100)
622 Value *V = WorkList.back();
624 if (Explored.count(V) != 0) {
629 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
630 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
631 // We've found some value that we can't explore which is different from
632 // the base. Therefore we can't do this transformation.
635 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
636 auto *CI = dyn_cast<CastInst>(V);
637 if (!CI->isNoopCast(DL))
640 if (Explored.count(CI->getOperand(0)) == 0)
641 WorkList.push_back(CI->getOperand(0));
644 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
645 // We're limiting the GEP to having one index. This will preserve
646 // the original pointer type. We could handle more cases in the
648 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
649 GEP->getType() != Start->getType())
652 if (Explored.count(GEP->getOperand(0)) == 0)
653 WorkList.push_back(GEP->getOperand(0));
656 if (WorkList.back() == V) {
658 // We've finished visiting this node, mark it as such.
662 if (auto *PN = dyn_cast<PHINode>(V)) {
663 // We cannot transform PHIs on unsplittable basic blocks.
664 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
671 // Explore the PHI nodes further.
672 for (auto *PN : PHIs)
673 for (Value *Op : PN->incoming_values())
674 if (Explored.count(Op) == 0)
675 WorkList.push_back(Op);
678 // Make sure that we can do this. Since we can't insert GEPs in a basic
679 // block before a PHI node, we can't easily do this transformation if
680 // we have PHI node users of transformed instructions.
681 for (Value *Val : Explored) {
682 for (Value *Use : Val->uses()) {
684 auto *PHI = dyn_cast<PHINode>(Use);
685 auto *Inst = dyn_cast<Instruction>(Val);
687 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
688 Explored.count(PHI) == 0)
691 if (PHI->getParent() == Inst->getParent())
698 // Sets the appropriate insert point on Builder where we can add
699 // a replacement Instruction for V (if that is possible).
700 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
701 bool Before = true) {
702 if (auto *PHI = dyn_cast<PHINode>(V)) {
703 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
706 if (auto *I = dyn_cast<Instruction>(V)) {
708 I = &*std::next(I->getIterator());
709 Builder.SetInsertPoint(I);
712 if (auto *A = dyn_cast<Argument>(V)) {
713 // Set the insertion point in the entry block.
714 BasicBlock &Entry = A->getParent()->getEntryBlock();
715 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
718 // Otherwise, this is a constant and we don't need to set a new
720 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
723 /// Returns a re-written value of Start as an indexed GEP using Base as a
725 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
726 const DataLayout &DL,
727 SetVector<Value *> &Explored) {
728 // Perform all the substitutions. This is a bit tricky because we can
729 // have cycles in our use-def chains.
730 // 1. Create the PHI nodes without any incoming values.
731 // 2. Create all the other values.
732 // 3. Add the edges for the PHI nodes.
733 // 4. Emit GEPs to get the original pointers.
734 // 5. Remove the original instructions.
735 Type *IndexType = IntegerType::get(
736 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType()));
738 DenseMap<Value *, Value *> NewInsts;
739 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
741 // Create the new PHI nodes, without adding any incoming values.
742 for (Value *Val : Explored) {
745 // Create empty phi nodes. This avoids cyclic dependencies when creating
746 // the remaining instructions.
747 if (auto *PHI = dyn_cast<PHINode>(Val))
748 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
749 PHI->getName() + ".idx", PHI);
751 IRBuilder<> Builder(Base->getContext());
753 // Create all the other instructions.
754 for (Value *Val : Explored) {
756 if (NewInsts.find(Val) != NewInsts.end())
759 if (auto *CI = dyn_cast<CastInst>(Val)) {
760 NewInsts[CI] = NewInsts[CI->getOperand(0)];
763 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
764 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
765 : GEP->getOperand(1);
766 setInsertionPoint(Builder, GEP);
767 // Indices might need to be sign extended. GEPs will magically do
768 // this, but we need to do it ourselves here.
769 if (Index->getType()->getScalarSizeInBits() !=
770 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
771 Index = Builder.CreateSExtOrTrunc(
772 Index, NewInsts[GEP->getOperand(0)]->getType(),
773 GEP->getOperand(0)->getName() + ".sext");
776 auto *Op = NewInsts[GEP->getOperand(0)];
777 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero())
778 NewInsts[GEP] = Index;
780 NewInsts[GEP] = Builder.CreateNSWAdd(
781 Op, Index, GEP->getOperand(0)->getName() + ".add");
784 if (isa<PHINode>(Val))
787 llvm_unreachable("Unexpected instruction type");
790 // Add the incoming values to the PHI nodes.
791 for (Value *Val : Explored) {
794 // All the instructions have been created, we can now add edges to the
796 if (auto *PHI = dyn_cast<PHINode>(Val)) {
797 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
798 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
799 Value *NewIncoming = PHI->getIncomingValue(I);
801 if (NewInsts.find(NewIncoming) != NewInsts.end())
802 NewIncoming = NewInsts[NewIncoming];
804 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
809 for (Value *Val : Explored) {
813 // Depending on the type, for external users we have to emit
814 // a GEP or a GEP + ptrtoint.
815 setInsertionPoint(Builder, Val, false);
817 // If required, create an inttoptr instruction for Base.
818 Value *NewBase = Base;
819 if (!Base->getType()->isPointerTy())
820 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
821 Start->getName() + "to.ptr");
823 Value *GEP = Builder.CreateInBoundsGEP(
824 Start->getType()->getPointerElementType(), NewBase,
825 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
827 if (!Val->getType()->isPointerTy()) {
828 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
829 Val->getName() + ".conv");
832 Val->replaceAllUsesWith(GEP);
835 return NewInsts[Start];
838 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
839 /// the input Value as a constant indexed GEP. Returns a pair containing
840 /// the GEPs Pointer and Index.
841 static std::pair<Value *, Value *>
842 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
843 Type *IndexType = IntegerType::get(V->getContext(),
844 DL.getPointerTypeSizeInBits(V->getType()));
846 Constant *Index = ConstantInt::getNullValue(IndexType);
848 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
849 // We accept only inbouds GEPs here to exclude the possibility of
851 if (!GEP->isInBounds())
853 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
854 GEP->getType() == V->getType()) {
855 V = GEP->getOperand(0);
856 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
857 Index = ConstantExpr::getAdd(
858 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
863 if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
864 if (!CI->isNoopCast(DL))
866 V = CI->getOperand(0);
869 if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
870 if (!CI->isNoopCast(DL))
872 V = CI->getOperand(0);
880 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
881 /// We can look through PHIs, GEPs and casts in order to determine a common base
882 /// between GEPLHS and RHS.
883 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
884 ICmpInst::Predicate Cond,
885 const DataLayout &DL) {
886 if (!GEPLHS->hasAllConstantIndices())
889 // Make sure the pointers have the same type.
890 if (GEPLHS->getType() != RHS->getType())
893 Value *PtrBase, *Index;
894 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
896 // The set of nodes that will take part in this transformation.
897 SetVector<Value *> Nodes;
899 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
902 // We know we can re-write this as
903 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
904 // Since we've only looked through inbouds GEPs we know that we
905 // can't have overflow on either side. We can therefore re-write
907 // OFFSET1 cmp OFFSET2
908 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
910 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
911 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
912 // offset. Since Index is the offset of LHS to the base pointer, we will now
913 // compare the offsets instead of comparing the pointers.
914 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
917 /// Fold comparisons between a GEP instruction and something else. At this point
918 /// we know that the GEP is on the LHS of the comparison.
919 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
920 ICmpInst::Predicate Cond,
922 // Don't transform signed compares of GEPs into index compares. Even if the
923 // GEP is inbounds, the final add of the base pointer can have signed overflow
924 // and would change the result of the icmp.
925 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
926 // the maximum signed value for the pointer type.
927 if (ICmpInst::isSigned(Cond))
930 // Look through bitcasts and addrspacecasts. We do not however want to remove
932 if (!isa<GetElementPtrInst>(RHS))
933 RHS = RHS->stripPointerCasts();
935 Value *PtrBase = GEPLHS->getOperand(0);
936 if (PtrBase == RHS && GEPLHS->isInBounds()) {
937 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
938 // This transformation (ignoring the base and scales) is valid because we
939 // know pointers can't overflow since the gep is inbounds. See if we can
940 // output an optimized form.
941 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
943 // If not, synthesize the offset the hard way.
945 Offset = EmitGEPOffset(GEPLHS);
946 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
947 Constant::getNullValue(Offset->getType()));
948 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
949 // If the base pointers are different, but the indices are the same, just
950 // compare the base pointer.
951 if (PtrBase != GEPRHS->getOperand(0)) {
952 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
953 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
954 GEPRHS->getOperand(0)->getType();
956 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
957 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
958 IndicesTheSame = false;
962 // If all indices are the same, just compare the base pointers.
964 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
966 // If we're comparing GEPs with two base pointers that only differ in type
967 // and both GEPs have only constant indices or just one use, then fold
968 // the compare with the adjusted indices.
969 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
970 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
971 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
972 PtrBase->stripPointerCasts() ==
973 GEPRHS->getOperand(0)->stripPointerCasts()) {
974 Value *LOffset = EmitGEPOffset(GEPLHS);
975 Value *ROffset = EmitGEPOffset(GEPRHS);
977 // If we looked through an addrspacecast between different sized address
978 // spaces, the LHS and RHS pointers are different sized
979 // integers. Truncate to the smaller one.
980 Type *LHSIndexTy = LOffset->getType();
981 Type *RHSIndexTy = ROffset->getType();
982 if (LHSIndexTy != RHSIndexTy) {
983 if (LHSIndexTy->getPrimitiveSizeInBits() <
984 RHSIndexTy->getPrimitiveSizeInBits()) {
985 ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
987 LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
990 Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
992 return replaceInstUsesWith(I, Cmp);
995 // Otherwise, the base pointers are different and the indices are
996 // different. Try convert this to an indexed compare by looking through
998 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1001 // If one of the GEPs has all zero indices, recurse.
1002 if (GEPLHS->hasAllZeroIndices())
1003 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
1004 ICmpInst::getSwappedPredicate(Cond), I);
1006 // If the other GEP has all zero indices, recurse.
1007 if (GEPRHS->hasAllZeroIndices())
1008 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
1010 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
1011 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
1012 // If the GEPs only differ by one index, compare it.
1013 unsigned NumDifferences = 0; // Keep track of # differences.
1014 unsigned DiffOperand = 0; // The operand that differs.
1015 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
1016 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
1017 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
1018 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
1019 // Irreconcilable differences.
1023 if (NumDifferences++) break;
1028 if (NumDifferences == 0) // SAME GEP?
1029 return replaceInstUsesWith(I, // No comparison is needed here.
1030 Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond)));
1032 else if (NumDifferences == 1 && GEPsInBounds) {
1033 Value *LHSV = GEPLHS->getOperand(DiffOperand);
1034 Value *RHSV = GEPRHS->getOperand(DiffOperand);
1035 // Make sure we do a signed comparison here.
1036 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
1040 // Only lower this if the icmp is the only user of the GEP or if we expect
1041 // the result to fold to a constant!
1042 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
1043 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
1044 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
1045 Value *L = EmitGEPOffset(GEPLHS);
1046 Value *R = EmitGEPOffset(GEPRHS);
1047 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1051 // Try convert this to an indexed compare by looking through PHIs/casts as a
1053 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1056 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI,
1057 const AllocaInst *Alloca,
1058 const Value *Other) {
1059 assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1061 // It would be tempting to fold away comparisons between allocas and any
1062 // pointer not based on that alloca (e.g. an argument). However, even
1063 // though such pointers cannot alias, they can still compare equal.
1065 // But LLVM doesn't specify where allocas get their memory, so if the alloca
1066 // doesn't escape we can argue that it's impossible to guess its value, and we
1067 // can therefore act as if any such guesses are wrong.
1069 // The code below checks that the alloca doesn't escape, and that it's only
1070 // used in a comparison once (the current instruction). The
1071 // single-comparison-use condition ensures that we're trivially folding all
1072 // comparisons against the alloca consistently, and avoids the risk of
1073 // erroneously folding a comparison of the pointer with itself.
1075 unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1077 SmallVector<const Use *, 32> Worklist;
1078 for (const Use &U : Alloca->uses()) {
1079 if (Worklist.size() >= MaxIter)
1081 Worklist.push_back(&U);
1084 unsigned NumCmps = 0;
1085 while (!Worklist.empty()) {
1086 assert(Worklist.size() <= MaxIter);
1087 const Use *U = Worklist.pop_back_val();
1088 const Value *V = U->getUser();
1091 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1092 isa<SelectInst>(V)) {
1094 } else if (isa<LoadInst>(V)) {
1095 // Loading from the pointer doesn't escape it.
1097 } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1098 // Storing *to* the pointer is fine, but storing the pointer escapes it.
1099 if (SI->getValueOperand() == U->get())
1102 } else if (isa<ICmpInst>(V)) {
1104 return nullptr; // Found more than one cmp.
1106 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1107 switch (Intrin->getIntrinsicID()) {
1108 // These intrinsics don't escape or compare the pointer. Memset is safe
1109 // because we don't allow ptrtoint. Memcpy and memmove are safe because
1110 // we don't allow stores, so src cannot point to V.
1111 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1112 case Intrinsic::dbg_declare: case Intrinsic::dbg_value:
1113 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1121 for (const Use &U : V->uses()) {
1122 if (Worklist.size() >= MaxIter)
1124 Worklist.push_back(&U);
1128 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1129 return replaceInstUsesWith(
1131 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1134 /// Fold "icmp pred (X+CI), X".
1135 Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI,
1136 Value *X, ConstantInt *CI,
1137 ICmpInst::Predicate Pred) {
1138 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1139 // so the values can never be equal. Similarly for all other "or equals"
1142 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
1143 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
1144 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
1145 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1147 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
1148 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1151 // (X+1) >u X --> X <u (0-1) --> X != 255
1152 // (X+2) >u X --> X <u (0-2) --> X <u 254
1153 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
1154 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1155 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
1157 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
1158 ConstantInt *SMax = ConstantInt::get(X->getContext(),
1159 APInt::getSignedMaxValue(BitWidth));
1161 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
1162 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
1163 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
1164 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
1165 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
1166 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
1167 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1168 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
1170 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
1171 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
1172 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1173 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1174 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
1175 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
1177 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1178 Constant *C = Builder->getInt(CI->getValue()-1);
1179 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
1182 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1183 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1184 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1185 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A,
1188 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1190 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1191 if (I.getPredicate() == I.ICMP_NE)
1192 Pred = CmpInst::getInversePredicate(Pred);
1193 return new ICmpInst(Pred, LHS, RHS);
1196 // Don't bother doing any work for cases which InstSimplify handles.
1200 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1202 if (AP2.isAllOnesValue())
1204 if (AP2.isNegative() != AP1.isNegative())
1211 // 'A' must be large enough to shift out the highest set bit.
1212 return getICmp(I.ICMP_UGT, A,
1213 ConstantInt::get(A->getType(), AP2.logBase2()));
1216 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1219 if (IsAShr && AP1.isNegative())
1220 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1222 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1225 if (IsAShr && AP1 == AP2.ashr(Shift)) {
1226 // There are multiple solutions if we are comparing against -1 and the LHS
1227 // of the ashr is not a power of two.
1228 if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1229 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1230 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1231 } else if (AP1 == AP2.lshr(Shift)) {
1232 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1236 // Shifting const2 will never be equal to const1.
1237 // FIXME: This should always be handled by InstSimplify?
1238 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1239 return replaceInstUsesWith(I, TorF);
1242 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1243 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1244 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A,
1247 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1249 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1250 if (I.getPredicate() == I.ICMP_NE)
1251 Pred = CmpInst::getInversePredicate(Pred);
1252 return new ICmpInst(Pred, LHS, RHS);
1255 // Don't bother doing any work for cases which InstSimplify handles.
1259 unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1261 if (!AP1 && AP2TrailingZeros != 0)
1264 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1267 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1269 // Get the distance between the lowest bits that are set.
1270 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1272 if (Shift > 0 && AP2.shl(Shift) == AP1)
1273 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1275 // Shifting const2 will never be equal to const1.
1276 // FIXME: This should always be handled by InstSimplify?
1277 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1278 return replaceInstUsesWith(I, TorF);
1281 /// The caller has matched a pattern of the form:
1282 /// I = icmp ugt (add (add A, B), CI2), CI1
1283 /// If this is of the form:
1285 /// if (sum+128 >u 255)
1286 /// Then replace it with llvm.sadd.with.overflow.i8.
1288 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1289 ConstantInt *CI2, ConstantInt *CI1,
1291 // The transformation we're trying to do here is to transform this into an
1292 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1293 // with a narrower add, and discard the add-with-constant that is part of the
1294 // range check (if we can't eliminate it, this isn't profitable).
1296 // In order to eliminate the add-with-constant, the compare can be its only
1298 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1299 if (!AddWithCst->hasOneUse())
1302 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1303 if (!CI2->getValue().isPowerOf2())
1305 unsigned NewWidth = CI2->getValue().countTrailingZeros();
1306 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1309 // The width of the new add formed is 1 more than the bias.
1312 // Check to see that CI1 is an all-ones value with NewWidth bits.
1313 if (CI1->getBitWidth() == NewWidth ||
1314 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1317 // This is only really a signed overflow check if the inputs have been
1318 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1319 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1320 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1321 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1322 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1325 // In order to replace the original add with a narrower
1326 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1327 // and truncates that discard the high bits of the add. Verify that this is
1329 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1330 for (User *U : OrigAdd->users()) {
1331 if (U == AddWithCst)
1334 // Only accept truncates for now. We would really like a nice recursive
1335 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1336 // chain to see which bits of a value are actually demanded. If the
1337 // original add had another add which was then immediately truncated, we
1338 // could still do the transformation.
1339 TruncInst *TI = dyn_cast<TruncInst>(U);
1340 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1344 // If the pattern matches, truncate the inputs to the narrower type and
1345 // use the sadd_with_overflow intrinsic to efficiently compute both the
1346 // result and the overflow bit.
1347 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1348 Value *F = Intrinsic::getDeclaration(I.getModule(),
1349 Intrinsic::sadd_with_overflow, NewType);
1351 InstCombiner::BuilderTy *Builder = IC.Builder;
1353 // Put the new code above the original add, in case there are any uses of the
1354 // add between the add and the compare.
1355 Builder->SetInsertPoint(OrigAdd);
1357 Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName() + ".trunc");
1358 Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName() + ".trunc");
1359 CallInst *Call = Builder->CreateCall(F, {TruncA, TruncB}, "sadd");
1360 Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result");
1361 Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType());
1363 // The inner add was the result of the narrow add, zero extended to the
1364 // wider type. Replace it with the result computed by the intrinsic.
1365 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1367 // The original icmp gets replaced with the overflow value.
1368 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1371 // Fold icmp Pred X, C.
1372 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
1373 CmpInst::Predicate Pred = Cmp.getPredicate();
1374 Value *X = Cmp.getOperand(0);
1377 if (!match(Cmp.getOperand(1), m_APInt(C)))
1380 Value *A = nullptr, *B = nullptr;
1382 // Match the following pattern, which is a common idiom when writing
1383 // overflow-safe integer arithmetic functions. The source performs an addition
1384 // in wider type and explicitly checks for overflow using comparisons against
1385 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1387 // TODO: This could probably be generalized to handle other overflow-safe
1388 // operations if we worked out the formulas to compute the appropriate magic
1392 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1394 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1395 if (Pred == ICmpInst::ICMP_UGT &&
1396 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1397 if (Instruction *Res = processUGT_ADDCST_ADD(
1398 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this))
1402 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1403 if (*C == 0 && Pred == ICmpInst::ICMP_SGT) {
1404 SelectPatternResult SPR = matchSelectPattern(X, A, B);
1405 if (SPR.Flavor == SPF_SMIN) {
1406 if (isKnownPositive(A, DL))
1407 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1408 if (isKnownPositive(B, DL))
1409 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1413 // FIXME: Use m_APInt to allow folds for splat constants.
1414 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1));
1418 // Canonicalize icmp instructions based on dominating conditions.
1419 BasicBlock *Parent = Cmp.getParent();
1420 BasicBlock *Dom = Parent->getSinglePredecessor();
1421 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr;
1422 ICmpInst::Predicate Pred2;
1423 BasicBlock *TrueBB, *FalseBB;
1425 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)),
1426 TrueBB, FalseBB)) &&
1427 TrueBB != FalseBB) {
1429 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue());
1430 ConstantRange DominatingCR =
1432 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue())
1433 : ConstantRange::makeExactICmpRegion(
1434 CmpInst::getInversePredicate(Pred2), CI2->getValue());
1435 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1436 ConstantRange Difference = DominatingCR.difference(CR);
1437 if (Intersection.isEmptySet())
1438 return replaceInstUsesWith(Cmp, Builder->getFalse());
1439 if (Difference.isEmptySet())
1440 return replaceInstUsesWith(Cmp, Builder->getTrue());
1442 // If this is a normal comparison, it demands all bits. If it is a sign
1443 // bit comparison, it only demands the sign bit.
1445 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit);
1447 // Canonicalizing a sign bit comparison that gets used in a branch,
1448 // pessimizes codegen by generating branch on zero instruction instead
1449 // of a test and branch. So we avoid canonicalizing in such situations
1450 // because test and branch instruction has better branch displacement
1451 // than compare and branch instruction.
1452 if (!isBranchOnSignBitCheck(Cmp, IsSignBit) && !Cmp.isEquality()) {
1453 if (auto *AI = Intersection.getSingleElement())
1454 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder->getInt(*AI));
1455 if (auto *AD = Difference.getSingleElement())
1456 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder->getInt(*AD));
1463 /// Fold icmp (trunc X, Y), C.
1464 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
1467 ICmpInst::Predicate Pred = Cmp.getPredicate();
1468 Value *X = Trunc->getOperand(0);
1469 if (*C == 1 && C->getBitWidth() > 1) {
1470 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1472 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1473 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1474 ConstantInt::get(V->getType(), 1));
1477 if (Cmp.isEquality() && Trunc->hasOneUse()) {
1478 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1479 // of the high bits truncated out of x are known.
1480 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1481 SrcBits = X->getType()->getScalarSizeInBits();
1482 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
1483 computeKnownBits(X, KnownZero, KnownOne, 0, &Cmp);
1485 // If all the high bits are known, we can do this xform.
1486 if ((KnownZero | KnownOne).countLeadingOnes() >= SrcBits - DstBits) {
1487 // Pull in the high bits from known-ones set.
1488 APInt NewRHS = C->zext(SrcBits);
1489 NewRHS |= KnownOne & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1490 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1497 /// Fold icmp (xor X, Y), C.
1498 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
1499 BinaryOperator *Xor,
1501 Value *X = Xor->getOperand(0);
1502 Value *Y = Xor->getOperand(1);
1504 if (!match(Y, m_APInt(XorC)))
1507 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1509 ICmpInst::Predicate Pred = Cmp.getPredicate();
1510 if ((Pred == ICmpInst::ICMP_SLT && *C == 0) ||
1511 (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())) {
1513 // If the sign bit of the XorCst is not set, there is no change to
1514 // the operation, just stop using the Xor.
1515 if (!XorC->isNegative()) {
1516 Cmp.setOperand(0, X);
1521 // Was the old condition true if the operand is positive?
1522 bool isTrueIfPositive = Pred == ICmpInst::ICMP_SGT;
1524 // If so, the new one isn't.
1525 isTrueIfPositive ^= true;
1527 Constant *CmpConstant = cast<Constant>(Cmp.getOperand(1));
1528 if (isTrueIfPositive)
1529 return new ICmpInst(ICmpInst::ICMP_SGT, X, SubOne(CmpConstant));
1531 return new ICmpInst(ICmpInst::ICMP_SLT, X, AddOne(CmpConstant));
1534 if (Xor->hasOneUse()) {
1535 // (icmp u/s (xor X SignBit), C) -> (icmp s/u X, (xor C SignBit))
1536 if (!Cmp.isEquality() && XorC->isSignBit()) {
1537 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1538 : Cmp.getSignedPredicate();
1539 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
1542 // (icmp u/s (xor X ~SignBit), C) -> (icmp s/u X, (xor C ~SignBit))
1543 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1544 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1545 : Cmp.getSignedPredicate();
1546 Pred = Cmp.getSwappedPredicate(Pred);
1547 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
1551 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
1552 // iff -C is a power of 2
1553 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~(*C) && (*C + 1).isPowerOf2())
1554 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1556 // (icmp ult (xor X, C), -C) -> (icmp uge X, C)
1557 // iff -C is a power of 2
1558 if (Pred == ICmpInst::ICMP_ULT && *XorC == -(*C) && C->isPowerOf2())
1559 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
1564 /// Fold icmp (and (sh X, Y), C2), C1.
1565 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
1566 const APInt *C1, const APInt *C2) {
1567 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1568 if (!Shift || !Shift->isShift())
1571 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1572 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1573 // code produced by the clang front-end, for bitfield access.
1574 // This seemingly simple opportunity to fold away a shift turns out to be
1575 // rather complicated. See PR17827 for details.
1576 unsigned ShiftOpcode = Shift->getOpcode();
1577 bool IsShl = ShiftOpcode == Instruction::Shl;
1579 if (match(Shift->getOperand(1), m_APInt(C3))) {
1580 bool CanFold = false;
1581 if (ShiftOpcode == Instruction::AShr) {
1582 // There may be some constraints that make this possible, but nothing
1583 // simple has been discovered yet.
1585 } else if (ShiftOpcode == Instruction::Shl) {
1586 // For a left shift, we can fold if the comparison is not signed. We can
1587 // also fold a signed comparison if the mask value and comparison value
1588 // are not negative. These constraints may not be obvious, but we can
1589 // prove that they are correct using an SMT solver.
1590 if (!Cmp.isSigned() || (!C2->isNegative() && !C1->isNegative()))
1592 } else if (ShiftOpcode == Instruction::LShr) {
1593 // For a logical right shift, we can fold if the comparison is not signed.
1594 // We can also fold a signed comparison if the shifted mask value and the
1595 // shifted comparison value are not negative. These constraints may not be
1596 // obvious, but we can prove that they are correct using an SMT solver.
1597 if (!Cmp.isSigned() ||
1598 (!C2->shl(*C3).isNegative() && !C1->shl(*C3).isNegative()))
1603 APInt NewCst = IsShl ? C1->lshr(*C3) : C1->shl(*C3);
1604 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
1605 // Check to see if we are shifting out any of the bits being compared.
1606 if (SameAsC1 != *C1) {
1607 // If we shifted bits out, the fold is not going to work out. As a
1608 // special case, check to see if this means that the result is always
1609 // true or false now.
1610 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1611 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1612 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1613 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1615 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
1616 APInt NewAndCst = IsShl ? C2->lshr(*C3) : C2->shl(*C3);
1617 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
1618 And->setOperand(0, Shift->getOperand(0));
1619 Worklist.Add(Shift); // Shift is dead.
1625 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1626 // preferable because it allows the C2 << Y expression to be hoisted out of a
1627 // loop if Y is invariant and X is not.
1628 if (Shift->hasOneUse() && *C1 == 0 && Cmp.isEquality() &&
1629 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1632 IsShl ? Builder->CreateLShr(And->getOperand(1), Shift->getOperand(1))
1633 : Builder->CreateShl(And->getOperand(1), Shift->getOperand(1));
1635 // Compute X & (C2 << Y).
1636 Value *NewAnd = Builder->CreateAnd(Shift->getOperand(0), NewShift);
1637 Cmp.setOperand(0, NewAnd);
1644 /// Fold icmp (and X, C2), C1.
1645 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
1646 BinaryOperator *And,
1649 if (!match(And->getOperand(1), m_APInt(C2)))
1652 if (!And->hasOneUse() || !And->getOperand(0)->hasOneUse())
1655 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1656 // the input width without changing the value produced, eliminate the cast:
1658 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1660 // We can do this transformation if the constants do not have their sign bits
1661 // set or if it is an equality comparison. Extending a relational comparison
1662 // when we're checking the sign bit would not work.
1664 if (match(And->getOperand(0), m_Trunc(m_Value(W))) &&
1665 (Cmp.isEquality() || (!C1->isNegative() && !C2->isNegative()))) {
1666 // TODO: Is this a good transform for vectors? Wider types may reduce
1667 // throughput. Should this transform be limited (even for scalars) by using
1668 // shouldChangeType()?
1669 if (!Cmp.getType()->isVectorTy()) {
1670 Type *WideType = W->getType();
1671 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1672 Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits));
1673 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1674 Value *NewAnd = Builder->CreateAnd(W, ZextC2, And->getName());
1675 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1679 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, C2))
1682 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1683 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1685 // iff pred isn't signed
1686 if (!Cmp.isSigned() && *C1 == 0 && match(And->getOperand(1), m_One())) {
1687 Constant *One = cast<Constant>(And->getOperand(1));
1688 Value *Or = And->getOperand(0);
1689 Value *A, *B, *LShr;
1690 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1691 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1692 unsigned UsesRemoved = 0;
1693 if (And->hasOneUse())
1695 if (Or->hasOneUse())
1697 if (LShr->hasOneUse())
1700 // Compute A & ((1 << B) | 1)
1701 Value *NewOr = nullptr;
1702 if (auto *C = dyn_cast<Constant>(B)) {
1703 if (UsesRemoved >= 1)
1704 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1706 if (UsesRemoved >= 3)
1707 NewOr = Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(),
1709 One, Or->getName());
1712 Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName());
1713 Cmp.setOperand(0, NewAnd);
1719 // (X & C2) > C1 --> (X & C2) != 0, if any bit set in (X & C2) will produce a
1720 // result greater than C1.
1721 unsigned NumTZ = C2->countTrailingZeros();
1722 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && NumTZ < C2->getBitWidth() &&
1723 APInt::getOneBitSet(C2->getBitWidth(), NumTZ).ugt(*C1)) {
1724 Constant *Zero = Constant::getNullValue(And->getType());
1725 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
1731 /// Fold icmp (and X, Y), C.
1732 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
1733 BinaryOperator *And,
1735 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1738 // TODO: These all require that Y is constant too, so refactor with the above.
1740 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1741 Value *X = And->getOperand(0);
1742 Value *Y = And->getOperand(1);
1743 if (auto *LI = dyn_cast<LoadInst>(X))
1744 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1745 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1746 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1747 !LI->isVolatile() && isa<ConstantInt>(Y)) {
1748 ConstantInt *C2 = cast<ConstantInt>(Y);
1749 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1753 if (!Cmp.isEquality())
1756 // X & -C == -C -> X > u ~C
1757 // X & -C != -C -> X <= u ~C
1758 // iff C is a power of 2
1759 if (Cmp.getOperand(1) == Y && (-(*C)).isPowerOf2()) {
1760 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1761 : CmpInst::ICMP_ULE;
1762 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1765 // (X & C2) == 0 -> (trunc X) >= 0
1766 // (X & C2) != 0 -> (trunc X) < 0
1767 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1769 if (And->hasOneUse() && *C == 0 && match(Y, m_APInt(C2))) {
1770 int32_t ExactLogBase2 = C2->exactLogBase2();
1771 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1772 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1773 if (And->getType()->isVectorTy())
1774 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
1775 Value *Trunc = Builder->CreateTrunc(X, NTy);
1776 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1777 : CmpInst::ICMP_SLT;
1778 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1785 /// Fold icmp (or X, Y), C.
1786 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
1788 ICmpInst::Predicate Pred = Cmp.getPredicate();
1790 // icmp slt signum(V) 1 --> icmp slt V, 1
1792 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1793 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1794 ConstantInt::get(V->getType(), 1));
1797 // X | C == C --> X <=u C
1798 // X | C != C --> X >u C
1799 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
1800 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) &&
1801 (*C + 1).isPowerOf2()) {
1802 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1803 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1));
1806 if (!Cmp.isEquality() || *C != 0 || !Or->hasOneUse())
1810 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1811 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1812 // -> and (icmp eq P, null), (icmp eq Q, null).
1814 Builder->CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1816 Builder->CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1817 auto LogicOpc = Pred == ICmpInst::Predicate::ICMP_EQ ? Instruction::And
1819 return BinaryOperator::Create(LogicOpc, CmpP, CmpQ);
1825 /// Fold icmp (mul X, Y), C.
1826 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp,
1827 BinaryOperator *Mul,
1830 if (!match(Mul->getOperand(1), m_APInt(MulC)))
1833 // If this is a test of the sign bit and the multiply is sign-preserving with
1834 // a constant operand, use the multiply LHS operand instead.
1835 ICmpInst::Predicate Pred = Cmp.getPredicate();
1836 if (isSignTest(Pred, *C) && Mul->hasNoSignedWrap()) {
1837 if (MulC->isNegative())
1838 Pred = ICmpInst::getSwappedPredicate(Pred);
1839 return new ICmpInst(Pred, Mul->getOperand(0),
1840 Constant::getNullValue(Mul->getType()));
1846 /// Fold icmp (shl 1, Y), C.
1847 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1850 if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1853 Type *ShiftType = Shl->getType();
1854 uint32_t TypeBits = C->getBitWidth();
1855 bool CIsPowerOf2 = C->isPowerOf2();
1856 ICmpInst::Predicate Pred = Cmp.getPredicate();
1857 if (Cmp.isUnsigned()) {
1858 // (1 << Y) pred C -> Y pred Log2(C)
1860 // (1 << Y) < 30 -> Y <= 4
1861 // (1 << Y) <= 30 -> Y <= 4
1862 // (1 << Y) >= 30 -> Y > 4
1863 // (1 << Y) > 30 -> Y > 4
1864 if (Pred == ICmpInst::ICMP_ULT)
1865 Pred = ICmpInst::ICMP_ULE;
1866 else if (Pred == ICmpInst::ICMP_UGE)
1867 Pred = ICmpInst::ICMP_UGT;
1870 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1871 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31
1872 unsigned CLog2 = C->logBase2();
1873 if (CLog2 == TypeBits - 1) {
1874 if (Pred == ICmpInst::ICMP_UGE)
1875 Pred = ICmpInst::ICMP_EQ;
1876 else if (Pred == ICmpInst::ICMP_ULT)
1877 Pred = ICmpInst::ICMP_NE;
1879 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1880 } else if (Cmp.isSigned()) {
1881 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1882 if (C->isAllOnesValue()) {
1883 // (1 << Y) <= -1 -> Y == 31
1884 if (Pred == ICmpInst::ICMP_SLE)
1885 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1887 // (1 << Y) > -1 -> Y != 31
1888 if (Pred == ICmpInst::ICMP_SGT)
1889 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1891 // (1 << Y) < 0 -> Y == 31
1892 // (1 << Y) <= 0 -> Y == 31
1893 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1894 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1896 // (1 << Y) >= 0 -> Y != 31
1897 // (1 << Y) > 0 -> Y != 31
1898 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
1899 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1901 } else if (Cmp.isEquality() && CIsPowerOf2) {
1902 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C->logBase2()));
1908 /// Fold icmp (shl X, Y), C.
1909 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
1910 BinaryOperator *Shl,
1912 const APInt *ShiftVal;
1913 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
1914 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), *C, *ShiftVal);
1916 const APInt *ShiftAmt;
1917 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
1918 return foldICmpShlOne(Cmp, Shl, C);
1920 // Check that the shift amount is in range. If not, don't perform undefined
1921 // shifts. When the shift is visited, it will be simplified.
1922 unsigned TypeBits = C->getBitWidth();
1923 if (ShiftAmt->uge(TypeBits))
1926 ICmpInst::Predicate Pred = Cmp.getPredicate();
1927 Value *X = Shl->getOperand(0);
1928 Type *ShType = Shl->getType();
1930 // NSW guarantees that we are only shifting out sign bits from the high bits,
1931 // so we can ASHR the compare constant without needing a mask and eliminate
1933 if (Shl->hasNoSignedWrap()) {
1934 if (Pred == ICmpInst::ICMP_SGT) {
1935 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
1936 APInt ShiftedC = C->ashr(*ShiftAmt);
1937 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1939 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1940 // This is the same code as the SGT case, but assert the pre-condition
1941 // that is needed for this to work with equality predicates.
1942 assert(C->ashr(*ShiftAmt).shl(*ShiftAmt) == *C &&
1943 "Compare known true or false was not folded");
1944 APInt ShiftedC = C->ashr(*ShiftAmt);
1945 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1947 if (Pred == ICmpInst::ICMP_SLT) {
1948 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
1949 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
1950 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
1951 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
1952 assert(!C->isMinSignedValue() && "Unexpected icmp slt");
1953 APInt ShiftedC = (*C - 1).ashr(*ShiftAmt) + 1;
1954 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1956 // If this is a signed comparison to 0 and the shift is sign preserving,
1957 // use the shift LHS operand instead; isSignTest may change 'Pred', so only
1958 // do that if we're sure to not continue on in this function.
1959 if (isSignTest(Pred, *C))
1960 return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
1963 // NUW guarantees that we are only shifting out zero bits from the high bits,
1964 // so we can LSHR the compare constant without needing a mask and eliminate
1966 if (Shl->hasNoUnsignedWrap()) {
1967 if (Pred == ICmpInst::ICMP_UGT) {
1968 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
1969 APInt ShiftedC = C->lshr(*ShiftAmt);
1970 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1972 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1973 // This is the same code as the UGT case, but assert the pre-condition
1974 // that is needed for this to work with equality predicates.
1975 assert(C->lshr(*ShiftAmt).shl(*ShiftAmt) == *C &&
1976 "Compare known true or false was not folded");
1977 APInt ShiftedC = C->lshr(*ShiftAmt);
1978 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1980 if (Pred == ICmpInst::ICMP_ULT) {
1981 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
1982 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
1983 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
1984 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
1985 assert(C->ugt(0) && "ult 0 should have been eliminated");
1986 APInt ShiftedC = (*C - 1).lshr(*ShiftAmt) + 1;
1987 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1991 if (Cmp.isEquality() && Shl->hasOneUse()) {
1992 // Strength-reduce the shift into an 'and'.
1993 Constant *Mask = ConstantInt::get(
1995 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
1996 Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
1997 Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt));
1998 return new ICmpInst(Pred, And, LShrC);
2001 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2002 bool TrueIfSigned = false;
2003 if (Shl->hasOneUse() && isSignBitCheck(Pred, *C, TrueIfSigned)) {
2004 // (X << 31) <s 0 --> (X & 1) != 0
2005 Constant *Mask = ConstantInt::get(
2007 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2008 Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
2009 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2010 And, Constant::getNullValue(ShType));
2013 // Transform (icmp pred iM (shl iM %v, N), C)
2014 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2015 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2016 // This enables us to get rid of the shift in favor of a trunc that may be
2017 // free on the target. It has the additional benefit of comparing to a
2018 // smaller constant that may be more target-friendly.
2019 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2020 if (Shl->hasOneUse() && Amt != 0 && C->countTrailingZeros() >= Amt &&
2021 DL.isLegalInteger(TypeBits - Amt)) {
2022 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2023 if (ShType->isVectorTy())
2024 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
2026 ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt));
2027 return new ICmpInst(Pred, Builder->CreateTrunc(X, TruncTy), NewC);
2033 /// Fold icmp ({al}shr X, Y), C.
2034 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
2035 BinaryOperator *Shr,
2037 // An exact shr only shifts out zero bits, so:
2038 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2039 Value *X = Shr->getOperand(0);
2040 CmpInst::Predicate Pred = Cmp.getPredicate();
2041 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && *C == 0)
2042 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2044 const APInt *ShiftVal;
2045 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2046 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), *C, *ShiftVal);
2048 const APInt *ShiftAmt;
2049 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2052 // Check that the shift amount is in range. If not, don't perform undefined
2053 // shifts. When the shift is visited it will be simplified.
2054 unsigned TypeBits = C->getBitWidth();
2055 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2056 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2059 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2060 if (!Cmp.isEquality()) {
2061 // If we have an unsigned comparison and an ashr, we can't simplify this.
2062 // Similarly for signed comparisons with lshr.
2063 if (Cmp.isSigned() != IsAShr)
2066 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
2067 // by a power of 2. Since we already have logic to simplify these,
2068 // transform to div and then simplify the resultant comparison.
2069 if (IsAShr && (!Shr->isExact() || ShAmtVal == TypeBits - 1))
2072 // Revisit the shift (to delete it).
2075 Constant *DivCst = ConstantInt::get(
2076 Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
2078 Value *Tmp = IsAShr ? Builder->CreateSDiv(X, DivCst, "", Shr->isExact())
2079 : Builder->CreateUDiv(X, DivCst, "", Shr->isExact());
2081 Cmp.setOperand(0, Tmp);
2083 // If the builder folded the binop, just return it.
2084 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
2088 // Otherwise, fold this div/compare.
2089 assert(TheDiv->getOpcode() == Instruction::SDiv ||
2090 TheDiv->getOpcode() == Instruction::UDiv);
2092 Instruction *Res = foldICmpDivConstant(Cmp, TheDiv, C);
2093 assert(Res && "This div/cst should have folded!");
2097 // Handle equality comparisons of shift-by-constant.
2099 // If the comparison constant changes with the shift, the comparison cannot
2100 // succeed (bits of the comparison constant cannot match the shifted value).
2101 // This should be known by InstSimplify and already be folded to true/false.
2102 assert(((IsAShr && C->shl(ShAmtVal).ashr(ShAmtVal) == *C) ||
2103 (!IsAShr && C->shl(ShAmtVal).lshr(ShAmtVal) == *C)) &&
2104 "Expected icmp+shr simplify did not occur.");
2106 // Check if the bits shifted out are known to be zero. If so, we can compare
2107 // against the unshifted value:
2108 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2109 Constant *ShiftedCmpRHS = ConstantInt::get(Shr->getType(), *C << ShAmtVal);
2110 if (Shr->hasOneUse()) {
2112 return new ICmpInst(Pred, X, ShiftedCmpRHS);
2114 // Otherwise strength reduce the shift into an 'and'.
2115 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2116 Constant *Mask = ConstantInt::get(Shr->getType(), Val);
2117 Value *And = Builder->CreateAnd(X, Mask, Shr->getName() + ".mask");
2118 return new ICmpInst(Pred, And, ShiftedCmpRHS);
2124 /// Fold icmp (udiv X, Y), C.
2125 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp,
2126 BinaryOperator *UDiv,
2129 if (!match(UDiv->getOperand(0), m_APInt(C2)))
2132 assert(C2 != 0 && "udiv 0, X should have been simplified already.");
2134 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2135 Value *Y = UDiv->getOperand(1);
2136 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2137 assert(!C->isMaxValue() &&
2138 "icmp ugt X, UINT_MAX should have been simplified already.");
2139 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2140 ConstantInt::get(Y->getType(), C2->udiv(*C + 1)));
2143 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2144 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2145 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2146 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2147 ConstantInt::get(Y->getType(), C2->udiv(*C)));
2153 /// Fold icmp ({su}div X, Y), C.
2154 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
2155 BinaryOperator *Div,
2157 // Fold: icmp pred ([us]div X, C2), C -> range test
2158 // Fold this div into the comparison, producing a range check.
2159 // Determine, based on the divide type, what the range is being
2160 // checked. If there is an overflow on the low or high side, remember
2161 // it, otherwise compute the range [low, hi) bounding the new value.
2162 // See: InsertRangeTest above for the kinds of replacements possible.
2164 if (!match(Div->getOperand(1), m_APInt(C2)))
2167 // FIXME: If the operand types don't match the type of the divide
2168 // then don't attempt this transform. The code below doesn't have the
2169 // logic to deal with a signed divide and an unsigned compare (and
2170 // vice versa). This is because (x /s C2) <s C produces different
2171 // results than (x /s C2) <u C or (x /u C2) <s C or even
2172 // (x /u C2) <u C. Simply casting the operands and result won't
2173 // work. :( The if statement below tests that condition and bails
2175 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2176 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2179 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2180 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2181 // division-by-constant cases should be present, we can not assert that they
2182 // have happened before we reach this icmp instruction.
2183 if (*C2 == 0 || *C2 == 1 || (DivIsSigned && C2->isAllOnesValue()))
2186 // TODO: We could do all of the computations below using APInt.
2187 Constant *CmpRHS = cast<Constant>(Cmp.getOperand(1));
2188 Constant *DivRHS = cast<Constant>(Div->getOperand(1));
2190 // Compute Prod = CmpRHS * DivRHS. We are essentially solving an equation of
2191 // form X / C2 = C. We solve for X by multiplying C2 (DivRHS) and C (CmpRHS).
2192 // By solving for X, we can turn this into a range check instead of computing
2194 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
2196 // Determine if the product overflows by seeing if the product is not equal to
2197 // the divide. Make sure we do the same kind of divide as in the LHS
2198 // instruction that we're folding.
2199 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS)
2200 : ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
2202 ICmpInst::Predicate Pred = Cmp.getPredicate();
2204 // If the division is known to be exact, then there is no remainder from the
2205 // divide, so the covered range size is unit, otherwise it is the divisor.
2206 Constant *RangeSize =
2207 Div->isExact() ? ConstantInt::get(Div->getType(), 1) : DivRHS;
2209 // Figure out the interval that is being checked. For example, a comparison
2210 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2211 // Compute this interval based on the constants involved and the signedness of
2212 // the compare/divide. This computes a half-open interval, keeping track of
2213 // whether either value in the interval overflows. After analysis each
2214 // overflow variable is set to 0 if it's corresponding bound variable is valid
2215 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2216 int LoOverflow = 0, HiOverflow = 0;
2217 Constant *LoBound = nullptr, *HiBound = nullptr;
2219 if (!DivIsSigned) { // udiv
2220 // e.g. X/5 op 3 --> [15, 20)
2222 HiOverflow = LoOverflow = ProdOV;
2224 // If this is not an exact divide, then many values in the range collapse
2225 // to the same result value.
2226 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2228 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2229 if (*C == 0) { // (X / pos) op 0
2230 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2231 LoBound = ConstantExpr::getNeg(SubOne(RangeSize));
2232 HiBound = RangeSize;
2233 } else if (C->isStrictlyPositive()) { // (X / pos) op pos
2234 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2235 HiOverflow = LoOverflow = ProdOV;
2237 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2238 } else { // (X / pos) op neg
2239 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2240 HiBound = AddOne(Prod);
2241 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2243 Constant *DivNeg = ConstantExpr::getNeg(RangeSize);
2244 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2247 } else if (C2->isNegative()) { // Divisor is < 0.
2249 RangeSize = ConstantExpr::getNeg(RangeSize);
2250 if (*C == 0) { // (X / neg) op 0
2251 // e.g. X/-5 op 0 --> [-4, 5)
2252 LoBound = AddOne(RangeSize);
2253 HiBound = ConstantExpr::getNeg(RangeSize);
2254 if (HiBound == DivRHS) { // -INTMIN = INTMIN
2255 HiOverflow = 1; // [INTMIN+1, overflow)
2256 HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
2258 } else if (C->isStrictlyPositive()) { // (X / neg) op pos
2259 // e.g. X/-5 op 3 --> [-19, -14)
2260 HiBound = AddOne(Prod);
2261 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2263 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2264 } else { // (X / neg) op neg
2265 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2266 LoOverflow = HiOverflow = ProdOV;
2268 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2271 // Dividing by a negative swaps the condition. LT <-> GT
2272 Pred = ICmpInst::getSwappedPredicate(Pred);
2275 Value *X = Div->getOperand(0);
2277 default: llvm_unreachable("Unhandled icmp opcode!");
2278 case ICmpInst::ICMP_EQ:
2279 if (LoOverflow && HiOverflow)
2280 return replaceInstUsesWith(Cmp, Builder->getFalse());
2282 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2283 ICmpInst::ICMP_UGE, X, LoBound);
2285 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2286 ICmpInst::ICMP_ULT, X, HiBound);
2287 return replaceInstUsesWith(
2288 Cmp, insertRangeTest(X, LoBound->getUniqueInteger(),
2289 HiBound->getUniqueInteger(), DivIsSigned, true));
2290 case ICmpInst::ICMP_NE:
2291 if (LoOverflow && HiOverflow)
2292 return replaceInstUsesWith(Cmp, Builder->getTrue());
2294 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2295 ICmpInst::ICMP_ULT, X, LoBound);
2297 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2298 ICmpInst::ICMP_UGE, X, HiBound);
2299 return replaceInstUsesWith(Cmp,
2300 insertRangeTest(X, LoBound->getUniqueInteger(),
2301 HiBound->getUniqueInteger(),
2302 DivIsSigned, false));
2303 case ICmpInst::ICMP_ULT:
2304 case ICmpInst::ICMP_SLT:
2305 if (LoOverflow == +1) // Low bound is greater than input range.
2306 return replaceInstUsesWith(Cmp, Builder->getTrue());
2307 if (LoOverflow == -1) // Low bound is less than input range.
2308 return replaceInstUsesWith(Cmp, Builder->getFalse());
2309 return new ICmpInst(Pred, X, LoBound);
2310 case ICmpInst::ICMP_UGT:
2311 case ICmpInst::ICMP_SGT:
2312 if (HiOverflow == +1) // High bound greater than input range.
2313 return replaceInstUsesWith(Cmp, Builder->getFalse());
2314 if (HiOverflow == -1) // High bound less than input range.
2315 return replaceInstUsesWith(Cmp, Builder->getTrue());
2316 if (Pred == ICmpInst::ICMP_UGT)
2317 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
2318 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
2324 /// Fold icmp (sub X, Y), C.
2325 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
2326 BinaryOperator *Sub,
2328 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2329 ICmpInst::Predicate Pred = Cmp.getPredicate();
2331 // The following transforms are only worth it if the only user of the subtract
2333 if (!Sub->hasOneUse())
2336 if (Sub->hasNoSignedWrap()) {
2337 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2338 if (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())
2339 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2341 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2342 if (Pred == ICmpInst::ICMP_SGT && *C == 0)
2343 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2345 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2346 if (Pred == ICmpInst::ICMP_SLT && *C == 0)
2347 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2349 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2350 if (Pred == ICmpInst::ICMP_SLT && *C == 1)
2351 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2355 if (!match(X, m_APInt(C2)))
2358 // C2 - Y <u C -> (Y | (C - 1)) == C2
2359 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2360 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() &&
2361 (*C2 & (*C - 1)) == (*C - 1))
2362 return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateOr(Y, *C - 1), X);
2364 // C2 - Y >u C -> (Y | C) != C2
2365 // iff C2 & C == C and C + 1 is a power of 2
2366 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C)
2367 return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateOr(Y, *C), X);
2372 /// Fold icmp (add X, Y), C.
2373 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
2374 BinaryOperator *Add,
2376 Value *Y = Add->getOperand(1);
2378 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2381 // Fold icmp pred (add X, C2), C.
2382 Value *X = Add->getOperand(0);
2383 Type *Ty = Add->getType();
2384 CmpInst::Predicate Pred = Cmp.getPredicate();
2386 // If the add does not wrap, we can always adjust the compare by subtracting
2387 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are
2388 // canonicalized to SGT/SLT.
2389 if (Add->hasNoSignedWrap() &&
2390 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) {
2392 APInt NewC = C->ssub_ov(*C2, Overflow);
2393 // If there is overflow, the result must be true or false.
2394 // TODO: Can we assert there is no overflow because InstSimplify always
2395 // handles those cases?
2397 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2398 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2401 auto CR = ConstantRange::makeExactICmpRegion(Pred, *C).subtract(*C2);
2402 const APInt &Upper = CR.getUpper();
2403 const APInt &Lower = CR.getLower();
2404 if (Cmp.isSigned()) {
2405 if (Lower.isSignBit())
2406 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2407 if (Upper.isSignBit())
2408 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2410 if (Lower.isMinValue())
2411 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2412 if (Upper.isMinValue())
2413 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2416 if (!Add->hasOneUse())
2419 // X+C <u C2 -> (X & -C2) == C
2420 // iff C & (C2-1) == 0
2421 // C2 is a power of 2
2422 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0)
2423 return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateAnd(X, -(*C)),
2424 ConstantExpr::getNeg(cast<Constant>(Y)));
2426 // X+C >u C2 -> (X & ~C2) != C
2428 // C2+1 is a power of 2
2429 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0)
2430 return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateAnd(X, ~(*C)),
2431 ConstantExpr::getNeg(cast<Constant>(Y)));
2436 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2437 /// where X is some kind of instruction.
2438 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) {
2440 if (!match(Cmp.getOperand(1), m_APInt(C)))
2444 if (match(Cmp.getOperand(0), m_BinOp(BO))) {
2445 switch (BO->getOpcode()) {
2446 case Instruction::Xor:
2447 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
2450 case Instruction::And:
2451 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
2454 case Instruction::Or:
2455 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
2458 case Instruction::Mul:
2459 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
2462 case Instruction::Shl:
2463 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
2466 case Instruction::LShr:
2467 case Instruction::AShr:
2468 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
2471 case Instruction::UDiv:
2472 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
2475 case Instruction::SDiv:
2476 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
2479 case Instruction::Sub:
2480 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
2483 case Instruction::Add:
2484 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
2490 // TODO: These folds could be refactored to be part of the above calls.
2491 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, C))
2496 if (match(Cmp.getOperand(0), m_Instruction(LHSI)) &&
2497 LHSI->getOpcode() == Instruction::Trunc)
2498 if (Instruction *I = foldICmpTruncConstant(Cmp, LHSI, C))
2501 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, C))
2507 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2508 /// icmp eq/ne BO, C.
2509 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
2512 // TODO: Some of these folds could work with arbitrary constants, but this
2513 // function is limited to scalar and vector splat constants.
2514 if (!Cmp.isEquality())
2517 ICmpInst::Predicate Pred = Cmp.getPredicate();
2518 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2519 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2520 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2522 switch (BO->getOpcode()) {
2523 case Instruction::SRem:
2524 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2525 if (*C == 0 && BO->hasOneUse()) {
2527 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2528 Value *NewRem = Builder->CreateURem(BOp0, BOp1, BO->getName());
2529 return new ICmpInst(Pred, NewRem,
2530 Constant::getNullValue(BO->getType()));
2534 case Instruction::Add: {
2535 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2537 if (match(BOp1, m_APInt(BOC))) {
2538 if (BO->hasOneUse()) {
2539 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1));
2540 return new ICmpInst(Pred, BOp0, SubC);
2542 } else if (*C == 0) {
2543 // Replace ((add A, B) != 0) with (A != -B) if A or B is
2544 // efficiently invertible, or if the add has just this one use.
2545 if (Value *NegVal = dyn_castNegVal(BOp1))
2546 return new ICmpInst(Pred, BOp0, NegVal);
2547 if (Value *NegVal = dyn_castNegVal(BOp0))
2548 return new ICmpInst(Pred, NegVal, BOp1);
2549 if (BO->hasOneUse()) {
2550 Value *Neg = Builder->CreateNeg(BOp1);
2552 return new ICmpInst(Pred, BOp0, Neg);
2557 case Instruction::Xor:
2558 if (BO->hasOneUse()) {
2559 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2560 // For the xor case, we can xor two constants together, eliminating
2561 // the explicit xor.
2562 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
2563 } else if (*C == 0) {
2564 // Replace ((xor A, B) != 0) with (A != B)
2565 return new ICmpInst(Pred, BOp0, BOp1);
2569 case Instruction::Sub:
2570 if (BO->hasOneUse()) {
2572 if (match(BOp0, m_APInt(BOC))) {
2573 // Replace ((sub BOC, B) != C) with (B != BOC-C).
2574 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS);
2575 return new ICmpInst(Pred, BOp1, SubC);
2576 } else if (*C == 0) {
2577 // Replace ((sub A, B) != 0) with (A != B).
2578 return new ICmpInst(Pred, BOp0, BOp1);
2582 case Instruction::Or: {
2584 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
2585 // Comparing if all bits outside of a constant mask are set?
2586 // Replace (X | C) == -1 with (X & ~C) == ~C.
2587 // This removes the -1 constant.
2588 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
2589 Value *And = Builder->CreateAnd(BOp0, NotBOC);
2590 return new ICmpInst(Pred, And, NotBOC);
2594 case Instruction::And: {
2596 if (match(BOp1, m_APInt(BOC))) {
2597 // If we have ((X & C) == C), turn it into ((X & C) != 0).
2598 if (C == BOC && C->isPowerOf2())
2599 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
2600 BO, Constant::getNullValue(RHS->getType()));
2602 // Don't perform the following transforms if the AND has multiple uses
2603 if (!BO->hasOneUse())
2606 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
2607 if (BOC->isSignBit()) {
2608 Constant *Zero = Constant::getNullValue(BOp0->getType());
2609 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
2610 return new ICmpInst(NewPred, BOp0, Zero);
2613 // ((X & ~7) == 0) --> X < 8
2614 if (*C == 0 && (~(*BOC) + 1).isPowerOf2()) {
2615 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1));
2616 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
2617 return new ICmpInst(NewPred, BOp0, NegBOC);
2622 case Instruction::Mul:
2623 if (*C == 0 && BO->hasNoSignedWrap()) {
2625 if (match(BOp1, m_APInt(BOC)) && *BOC != 0) {
2626 // The trivial case (mul X, 0) is handled by InstSimplify.
2627 // General case : (mul X, C) != 0 iff X != 0
2628 // (mul X, C) == 0 iff X == 0
2629 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType()));
2633 case Instruction::UDiv:
2635 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
2636 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
2637 return new ICmpInst(NewPred, BOp1, BOp0);
2646 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
2647 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
2649 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0));
2650 if (!II || !Cmp.isEquality())
2653 // Handle icmp {eq|ne} <intrinsic>, intcst.
2654 switch (II->getIntrinsicID()) {
2655 case Intrinsic::bswap:
2657 Cmp.setOperand(0, II->getArgOperand(0));
2658 Cmp.setOperand(1, Builder->getInt(C->byteSwap()));
2660 case Intrinsic::ctlz:
2661 case Intrinsic::cttz:
2662 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
2663 if (*C == C->getBitWidth()) {
2665 Cmp.setOperand(0, II->getArgOperand(0));
2666 Cmp.setOperand(1, ConstantInt::getNullValue(II->getType()));
2670 case Intrinsic::ctpop: {
2671 // popcount(A) == 0 -> A == 0 and likewise for !=
2672 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
2673 bool IsZero = *C == 0;
2674 if (IsZero || *C == C->getBitWidth()) {
2676 Cmp.setOperand(0, II->getArgOperand(0));
2677 auto *NewOp = IsZero ? Constant::getNullValue(II->getType())
2678 : Constant::getAllOnesValue(II->getType());
2679 Cmp.setOperand(1, NewOp);
2690 /// Handle icmp with constant (but not simple integer constant) RHS.
2691 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
2692 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2693 Constant *RHSC = dyn_cast<Constant>(Op1);
2694 Instruction *LHSI = dyn_cast<Instruction>(Op0);
2698 switch (LHSI->getOpcode()) {
2699 case Instruction::GetElementPtr:
2700 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
2701 if (RHSC->isNullValue() &&
2702 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
2703 return new ICmpInst(
2704 I.getPredicate(), LHSI->getOperand(0),
2705 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2707 case Instruction::PHI:
2708 // Only fold icmp into the PHI if the phi and icmp are in the same
2709 // block. If in the same block, we're encouraging jump threading. If
2710 // not, we are just pessimizing the code by making an i1 phi.
2711 if (LHSI->getParent() == I.getParent())
2712 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
2715 case Instruction::Select: {
2716 // If either operand of the select is a constant, we can fold the
2717 // comparison into the select arms, which will cause one to be
2718 // constant folded and the select turned into a bitwise or.
2719 Value *Op1 = nullptr, *Op2 = nullptr;
2720 ConstantInt *CI = nullptr;
2721 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
2722 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2723 CI = dyn_cast<ConstantInt>(Op1);
2725 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
2726 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2727 CI = dyn_cast<ConstantInt>(Op2);
2730 // We only want to perform this transformation if it will not lead to
2731 // additional code. This is true if either both sides of the select
2732 // fold to a constant (in which case the icmp is replaced with a select
2733 // which will usually simplify) or this is the only user of the
2734 // select (in which case we are trading a select+icmp for a simpler
2735 // select+icmp) or all uses of the select can be replaced based on
2736 // dominance information ("Global cases").
2737 bool Transform = false;
2740 else if (Op1 || Op2) {
2742 if (LHSI->hasOneUse())
2745 else if (CI && !CI->isZero())
2746 // When Op1 is constant try replacing select with second operand.
2747 // Otherwise Op2 is constant and try replacing select with first
2750 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
2754 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
2757 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
2759 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
2763 case Instruction::IntToPtr:
2764 // icmp pred inttoptr(X), null -> icmp pred X, 0
2765 if (RHSC->isNullValue() &&
2766 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
2767 return new ICmpInst(
2768 I.getPredicate(), LHSI->getOperand(0),
2769 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2772 case Instruction::Load:
2773 // Try to optimize things like "A[i] > 4" to index computations.
2774 if (GetElementPtrInst *GEP =
2775 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
2776 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
2777 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
2778 !cast<LoadInst>(LHSI)->isVolatile())
2779 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
2788 /// Try to fold icmp (binop), X or icmp X, (binop).
2789 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
2790 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2792 // Special logic for binary operators.
2793 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
2794 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
2798 CmpInst::Predicate Pred = I.getPredicate();
2799 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
2800 if (BO0 && isa<OverflowingBinaryOperator>(BO0))
2802 ICmpInst::isEquality(Pred) ||
2803 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
2804 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
2805 if (BO1 && isa<OverflowingBinaryOperator>(BO1))
2807 ICmpInst::isEquality(Pred) ||
2808 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
2809 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
2811 // Analyze the case when either Op0 or Op1 is an add instruction.
2812 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
2813 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2814 if (BO0 && BO0->getOpcode() == Instruction::Add) {
2815 A = BO0->getOperand(0);
2816 B = BO0->getOperand(1);
2818 if (BO1 && BO1->getOpcode() == Instruction::Add) {
2819 C = BO1->getOperand(0);
2820 D = BO1->getOperand(1);
2823 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2824 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
2825 return new ICmpInst(Pred, A == Op1 ? B : A,
2826 Constant::getNullValue(Op1->getType()));
2828 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2829 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
2830 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
2833 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
2834 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
2836 // Try not to increase register pressure.
2837 BO0->hasOneUse() && BO1->hasOneUse()) {
2838 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2841 // C + B == C + D -> B == D
2844 } else if (A == D) {
2845 // D + B == C + D -> B == C
2848 } else if (B == C) {
2849 // A + C == C + D -> A == D
2854 // A + D == C + D -> A == C
2858 return new ICmpInst(Pred, Y, Z);
2861 // icmp slt (X + -1), Y -> icmp sle X, Y
2862 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
2863 match(B, m_AllOnes()))
2864 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
2866 // icmp sge (X + -1), Y -> icmp sgt X, Y
2867 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
2868 match(B, m_AllOnes()))
2869 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
2871 // icmp sle (X + 1), Y -> icmp slt X, Y
2872 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
2873 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
2875 // icmp sgt (X + 1), Y -> icmp sge X, Y
2876 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
2877 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
2879 // icmp sgt X, (Y + -1) -> icmp sge X, Y
2880 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
2881 match(D, m_AllOnes()))
2882 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
2884 // icmp sle X, (Y + -1) -> icmp slt X, Y
2885 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
2886 match(D, m_AllOnes()))
2887 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
2889 // icmp sge X, (Y + 1) -> icmp sgt X, Y
2890 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
2891 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
2893 // icmp slt X, (Y + 1) -> icmp sle X, Y
2894 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
2895 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
2897 // TODO: The subtraction-related identities shown below also hold, but
2898 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
2899 // wouldn't happen even if they were implemented.
2901 // icmp ult (X - 1), Y -> icmp ule X, Y
2902 // icmp uge (X - 1), Y -> icmp ugt X, Y
2903 // icmp ugt X, (Y - 1) -> icmp uge X, Y
2904 // icmp ule X, (Y - 1) -> icmp ult X, Y
2906 // icmp ule (X + 1), Y -> icmp ult X, Y
2907 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
2908 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
2910 // icmp ugt (X + 1), Y -> icmp uge X, Y
2911 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
2912 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
2914 // icmp uge X, (Y + 1) -> icmp ugt X, Y
2915 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
2916 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
2918 // icmp ult X, (Y + 1) -> icmp ule X, Y
2919 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
2920 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
2922 // if C1 has greater magnitude than C2:
2923 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y
2924 // s.t. C3 = C1 - C2
2926 // if C2 has greater magnitude than C1:
2927 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3)
2928 // s.t. C3 = C2 - C1
2929 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
2930 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
2931 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
2932 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
2933 const APInt &AP1 = C1->getValue();
2934 const APInt &AP2 = C2->getValue();
2935 if (AP1.isNegative() == AP2.isNegative()) {
2936 APInt AP1Abs = C1->getValue().abs();
2937 APInt AP2Abs = C2->getValue().abs();
2938 if (AP1Abs.uge(AP2Abs)) {
2939 ConstantInt *C3 = Builder->getInt(AP1 - AP2);
2940 Value *NewAdd = Builder->CreateNSWAdd(A, C3);
2941 return new ICmpInst(Pred, NewAdd, C);
2943 ConstantInt *C3 = Builder->getInt(AP2 - AP1);
2944 Value *NewAdd = Builder->CreateNSWAdd(C, C3);
2945 return new ICmpInst(Pred, A, NewAdd);
2950 // Analyze the case when either Op0 or Op1 is a sub instruction.
2951 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
2956 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
2957 A = BO0->getOperand(0);
2958 B = BO0->getOperand(1);
2960 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
2961 C = BO1->getOperand(0);
2962 D = BO1->getOperand(1);
2965 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
2966 if (A == Op1 && NoOp0WrapProblem)
2967 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
2969 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
2970 if (C == Op0 && NoOp1WrapProblem)
2971 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
2973 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
2974 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
2975 // Try not to increase register pressure.
2976 BO0->hasOneUse() && BO1->hasOneUse())
2977 return new ICmpInst(Pred, A, C);
2979 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
2980 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
2981 // Try not to increase register pressure.
2982 BO0->hasOneUse() && BO1->hasOneUse())
2983 return new ICmpInst(Pred, D, B);
2985 // icmp (0-X) < cst --> x > -cst
2986 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
2988 if (match(BO0, m_Neg(m_Value(X))))
2989 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
2990 if (!RHSC->isMinValue(/*isSigned=*/true))
2991 return new ICmpInst(I.getSwappedPredicate(), X,
2992 ConstantExpr::getNeg(RHSC));
2995 BinaryOperator *SRem = nullptr;
2996 // icmp (srem X, Y), Y
2997 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
2999 // icmp Y, (srem X, Y)
3000 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3001 Op0 == BO1->getOperand(1))
3004 // We don't check hasOneUse to avoid increasing register pressure because
3005 // the value we use is the same value this instruction was already using.
3006 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3009 case ICmpInst::ICMP_EQ:
3010 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3011 case ICmpInst::ICMP_NE:
3012 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
3013 case ICmpInst::ICMP_SGT:
3014 case ICmpInst::ICMP_SGE:
3015 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
3016 Constant::getAllOnesValue(SRem->getType()));
3017 case ICmpInst::ICMP_SLT:
3018 case ICmpInst::ICMP_SLE:
3019 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
3020 Constant::getNullValue(SRem->getType()));
3024 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
3025 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
3026 switch (BO0->getOpcode()) {
3029 case Instruction::Add:
3030 case Instruction::Sub:
3031 case Instruction::Xor:
3032 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
3033 return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
3034 BO1->getOperand(0));
3035 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
3036 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
3037 if (CI->getValue().isSignBit()) {
3038 ICmpInst::Predicate Pred =
3039 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3040 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3043 if (BO0->getOpcode() == Instruction::Xor && CI->isMaxValue(true)) {
3044 ICmpInst::Predicate Pred =
3045 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3046 Pred = I.getSwappedPredicate(Pred);
3047 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3051 case Instruction::Mul:
3052 if (!I.isEquality())
3055 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
3056 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
3057 // Mask = -1 >> count-trailing-zeros(Cst).
3058 if (!CI->isZero() && !CI->isOne()) {
3059 const APInt &AP = CI->getValue();
3060 ConstantInt *Mask = ConstantInt::get(
3062 APInt::getLowBitsSet(AP.getBitWidth(),
3063 AP.getBitWidth() - AP.countTrailingZeros()));
3064 Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask);
3065 Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask);
3066 return new ICmpInst(I.getPredicate(), And1, And2);
3070 case Instruction::UDiv:
3071 case Instruction::LShr:
3075 case Instruction::SDiv:
3076 case Instruction::AShr:
3077 if (!BO0->isExact() || !BO1->isExact())
3079 return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
3080 BO1->getOperand(0));
3081 case Instruction::Shl: {
3082 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
3083 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
3086 if (!NSW && I.isSigned())
3088 return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
3089 BO1->getOperand(0));
3095 // Transform A & (L - 1) `ult` L --> L != 0
3096 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
3098 m_CombineOr(m_And(m_Value(), LSubOne), m_And(LSubOne, m_Value()));
3100 if (match(BO0, BitwiseAnd) && I.getPredicate() == ICmpInst::ICMP_ULT) {
3101 auto *Zero = Constant::getNullValue(BO0->getType());
3102 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
3109 /// Fold icmp Pred min|max(X, Y), X.
3110 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
3111 ICmpInst::Predicate Pred = Cmp.getPredicate();
3112 Value *Op0 = Cmp.getOperand(0);
3113 Value *X = Cmp.getOperand(1);
3115 // Canonicalize minimum or maximum operand to LHS of the icmp.
3116 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
3117 match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
3118 match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
3119 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
3121 Pred = Cmp.getSwappedPredicate();
3125 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
3126 // smin(X, Y) == X --> X s<= Y
3127 // smin(X, Y) s>= X --> X s<= Y
3128 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
3129 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3131 // smin(X, Y) != X --> X s> Y
3132 // smin(X, Y) s< X --> X s> Y
3133 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
3134 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3136 // These cases should be handled in InstSimplify:
3137 // smin(X, Y) s<= X --> true
3138 // smin(X, Y) s> X --> false
3142 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
3143 // smax(X, Y) == X --> X s>= Y
3144 // smax(X, Y) s<= X --> X s>= Y
3145 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
3146 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3148 // smax(X, Y) != X --> X s< Y
3149 // smax(X, Y) s> X --> X s< Y
3150 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
3151 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3153 // These cases should be handled in InstSimplify:
3154 // smax(X, Y) s>= X --> true
3155 // smax(X, Y) s< X --> false
3159 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
3160 // umin(X, Y) == X --> X u<= Y
3161 // umin(X, Y) u>= X --> X u<= Y
3162 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
3163 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
3165 // umin(X, Y) != X --> X u> Y
3166 // umin(X, Y) u< X --> X u> Y
3167 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
3168 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
3170 // These cases should be handled in InstSimplify:
3171 // umin(X, Y) u<= X --> true
3172 // umin(X, Y) u> X --> false
3176 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
3177 // umax(X, Y) == X --> X u>= Y
3178 // umax(X, Y) u<= X --> X u>= Y
3179 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
3180 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
3182 // umax(X, Y) != X --> X u< Y
3183 // umax(X, Y) u> X --> X u< Y
3184 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
3185 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
3187 // These cases should be handled in InstSimplify:
3188 // umax(X, Y) u>= X --> true
3189 // umax(X, Y) u< X --> false
3196 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
3197 if (!I.isEquality())
3200 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3201 Value *A, *B, *C, *D;
3202 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
3203 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
3204 Value *OtherVal = A == Op1 ? B : A;
3205 return new ICmpInst(I.getPredicate(), OtherVal,
3206 Constant::getNullValue(A->getType()));
3209 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
3210 // A^c1 == C^c2 --> A == C^(c1^c2)
3211 ConstantInt *C1, *C2;
3212 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
3214 Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue());
3215 Value *Xor = Builder->CreateXor(C, NC);
3216 return new ICmpInst(I.getPredicate(), A, Xor);
3219 // A^B == A^D -> B == D
3221 return new ICmpInst(I.getPredicate(), B, D);
3223 return new ICmpInst(I.getPredicate(), B, C);
3225 return new ICmpInst(I.getPredicate(), A, D);
3227 return new ICmpInst(I.getPredicate(), A, C);
3231 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
3232 // A == (A^B) -> B == 0
3233 Value *OtherVal = A == Op0 ? B : A;
3234 return new ICmpInst(I.getPredicate(), OtherVal,
3235 Constant::getNullValue(A->getType()));
3238 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
3239 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
3240 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
3241 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
3247 } else if (A == D) {
3251 } else if (B == C) {
3255 } else if (B == D) {
3261 if (X) { // Build (X^Y) & Z
3262 Op1 = Builder->CreateXor(X, Y);
3263 Op1 = Builder->CreateAnd(Op1, Z);
3264 I.setOperand(0, Op1);
3265 I.setOperand(1, Constant::getNullValue(Op1->getType()));
3270 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
3271 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
3273 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
3274 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
3275 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
3276 match(Op1, m_ZExt(m_Value(A))))) {
3277 APInt Pow2 = Cst1->getValue() + 1;
3278 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
3279 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
3280 return new ICmpInst(I.getPredicate(), A,
3281 Builder->CreateTrunc(B, A->getType()));
3284 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
3285 // For lshr and ashr pairs.
3286 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3287 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
3288 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3289 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
3290 unsigned TypeBits = Cst1->getBitWidth();
3291 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3292 if (ShAmt < TypeBits && ShAmt != 0) {
3293 ICmpInst::Predicate Pred = I.getPredicate() == ICmpInst::ICMP_NE
3294 ? ICmpInst::ICMP_UGE
3295 : ICmpInst::ICMP_ULT;
3296 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
3297 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
3298 return new ICmpInst(Pred, Xor, Builder->getInt(CmpVal));
3302 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
3303 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
3304 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
3305 unsigned TypeBits = Cst1->getBitWidth();
3306 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3307 if (ShAmt < TypeBits && ShAmt != 0) {
3308 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
3309 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
3310 Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal),
3311 I.getName() + ".mask");
3312 return new ICmpInst(I.getPredicate(), And,
3313 Constant::getNullValue(Cst1->getType()));
3317 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
3318 // "icmp (and X, mask), cst"
3320 if (Op0->hasOneUse() &&
3321 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
3322 match(Op1, m_ConstantInt(Cst1)) &&
3323 // Only do this when A has multiple uses. This is most important to do
3324 // when it exposes other optimizations.
3326 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
3328 if (ShAmt < ASize) {
3330 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
3333 APInt CmpV = Cst1->getValue().zext(ASize);
3336 Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV));
3337 return new ICmpInst(I.getPredicate(), Mask, Builder->getInt(CmpV));
3344 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so
3346 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
3347 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0));
3348 Value *LHSCIOp = LHSCI->getOperand(0);
3349 Type *SrcTy = LHSCIOp->getType();
3350 Type *DestTy = LHSCI->getType();
3353 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
3354 // integer type is the same size as the pointer type.
3355 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
3356 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
3357 Value *RHSOp = nullptr;
3358 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
3359 Value *RHSCIOp = RHSC->getOperand(0);
3360 if (RHSCIOp->getType()->getPointerAddressSpace() ==
3361 LHSCIOp->getType()->getPointerAddressSpace()) {
3362 RHSOp = RHSC->getOperand(0);
3363 // If the pointer types don't match, insert a bitcast.
3364 if (LHSCIOp->getType() != RHSOp->getType())
3365 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
3367 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
3368 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
3372 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp);
3375 // The code below only handles extension cast instructions, so far.
3377 if (LHSCI->getOpcode() != Instruction::ZExt &&
3378 LHSCI->getOpcode() != Instruction::SExt)
3381 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
3382 bool isSignedCmp = ICmp.isSigned();
3384 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) {
3385 // Not an extension from the same type?
3386 RHSCIOp = CI->getOperand(0);
3387 if (RHSCIOp->getType() != LHSCIOp->getType())
3390 // If the signedness of the two casts doesn't agree (i.e. one is a sext
3391 // and the other is a zext), then we can't handle this.
3392 if (CI->getOpcode() != LHSCI->getOpcode())
3395 // Deal with equality cases early.
3396 if (ICmp.isEquality())
3397 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3399 // A signed comparison of sign extended values simplifies into a
3400 // signed comparison.
3401 if (isSignedCmp && isSignedExt)
3402 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3404 // The other three cases all fold into an unsigned comparison.
3405 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
3408 // If we aren't dealing with a constant on the RHS, exit early.
3409 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
3413 // Compute the constant that would happen if we truncated to SrcTy then
3414 // re-extended to DestTy.
3415 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
3416 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy);
3418 // If the re-extended constant didn't change...
3420 // Deal with equality cases early.
3421 if (ICmp.isEquality())
3422 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3424 // A signed comparison of sign extended values simplifies into a
3425 // signed comparison.
3426 if (isSignedExt && isSignedCmp)
3427 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3429 // The other three cases all fold into an unsigned comparison.
3430 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1);
3433 // The re-extended constant changed, partly changed (in the case of a vector),
3434 // or could not be determined to be equal (in the case of a constant
3435 // expression), so the constant cannot be represented in the shorter type.
3436 // Consequently, we cannot emit a simple comparison.
3437 // All the cases that fold to true or false will have already been handled
3438 // by SimplifyICmpInst, so only deal with the tricky case.
3440 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C))
3443 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
3444 // should have been folded away previously and not enter in here.
3446 // We're performing an unsigned comp with a sign extended value.
3447 // This is true if the input is >= 0. [aka >s -1]
3448 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
3449 Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
3451 // Finally, return the value computed.
3452 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
3453 return replaceInstUsesWith(ICmp, Result);
3455 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
3456 return BinaryOperator::CreateNot(Result);
3459 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
3460 Value *RHS, Instruction &OrigI,
3461 Value *&Result, Constant *&Overflow) {
3462 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
3463 std::swap(LHS, RHS);
3465 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) {
3467 Overflow = OverflowVal;
3469 Result->takeName(&OrigI);
3473 // If the overflow check was an add followed by a compare, the insertion point
3474 // may be pointing to the compare. We want to insert the new instructions
3475 // before the add in case there are uses of the add between the add and the
3477 Builder->SetInsertPoint(&OrigI);
3481 llvm_unreachable("bad overflow check kind!");
3483 case OCF_UNSIGNED_ADD: {
3484 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
3485 if (OR == OverflowResult::NeverOverflows)
3486 return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(),
3489 if (OR == OverflowResult::AlwaysOverflows)
3490 return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true);
3492 // Fall through uadd into sadd
3495 case OCF_SIGNED_ADD: {
3496 // X + 0 -> {X, false}
3497 if (match(RHS, m_Zero()))
3498 return SetResult(LHS, Builder->getFalse(), false);
3500 // We can strength reduce this signed add into a regular add if we can prove
3501 // that it will never overflow.
3502 if (OCF == OCF_SIGNED_ADD)
3503 if (WillNotOverflowSignedAdd(LHS, RHS, OrigI))
3504 return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(),
3509 case OCF_UNSIGNED_SUB:
3510 case OCF_SIGNED_SUB: {
3511 // X - 0 -> {X, false}
3512 if (match(RHS, m_Zero()))
3513 return SetResult(LHS, Builder->getFalse(), false);
3515 if (OCF == OCF_SIGNED_SUB) {
3516 if (WillNotOverflowSignedSub(LHS, RHS, OrigI))
3517 return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(),
3520 if (WillNotOverflowUnsignedSub(LHS, RHS, OrigI))
3521 return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(),
3527 case OCF_UNSIGNED_MUL: {
3528 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
3529 if (OR == OverflowResult::NeverOverflows)
3530 return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(),
3532 if (OR == OverflowResult::AlwaysOverflows)
3533 return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true);
3536 case OCF_SIGNED_MUL:
3537 // X * undef -> undef
3538 if (isa<UndefValue>(RHS))
3539 return SetResult(RHS, UndefValue::get(Builder->getInt1Ty()), false);
3541 // X * 0 -> {0, false}
3542 if (match(RHS, m_Zero()))
3543 return SetResult(RHS, Builder->getFalse(), false);
3545 // X * 1 -> {X, false}
3546 if (match(RHS, m_One()))
3547 return SetResult(LHS, Builder->getFalse(), false);
3549 if (OCF == OCF_SIGNED_MUL)
3550 if (WillNotOverflowSignedMul(LHS, RHS, OrigI))
3551 return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(),
3559 /// \brief Recognize and process idiom involving test for multiplication
3562 /// The caller has matched a pattern of the form:
3563 /// I = cmp u (mul(zext A, zext B), V
3564 /// The function checks if this is a test for overflow and if so replaces
3565 /// multiplication with call to 'mul.with.overflow' intrinsic.
3567 /// \param I Compare instruction.
3568 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
3569 /// the compare instruction. Must be of integer type.
3570 /// \param OtherVal The other argument of compare instruction.
3571 /// \returns Instruction which must replace the compare instruction, NULL if no
3572 /// replacement required.
3573 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
3574 Value *OtherVal, InstCombiner &IC) {
3575 // Don't bother doing this transformation for pointers, don't do it for
3577 if (!isa<IntegerType>(MulVal->getType()))
3580 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
3581 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
3582 auto *MulInstr = dyn_cast<Instruction>(MulVal);
3585 assert(MulInstr->getOpcode() == Instruction::Mul);
3587 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
3588 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
3589 assert(LHS->getOpcode() == Instruction::ZExt);
3590 assert(RHS->getOpcode() == Instruction::ZExt);
3591 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
3593 // Calculate type and width of the result produced by mul.with.overflow.
3594 Type *TyA = A->getType(), *TyB = B->getType();
3595 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
3596 WidthB = TyB->getPrimitiveSizeInBits();
3599 if (WidthB > WidthA) {
3607 // In order to replace the original mul with a narrower mul.with.overflow,
3608 // all uses must ignore upper bits of the product. The number of used low
3609 // bits must be not greater than the width of mul.with.overflow.
3610 if (MulVal->hasNUsesOrMore(2))
3611 for (User *U : MulVal->users()) {
3614 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3615 // Check if truncation ignores bits above MulWidth.
3616 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
3617 if (TruncWidth > MulWidth)
3619 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3620 // Check if AND ignores bits above MulWidth.
3621 if (BO->getOpcode() != Instruction::And)
3623 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
3624 const APInt &CVal = CI->getValue();
3625 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
3629 // Other uses prohibit this transformation.
3634 // Recognize patterns
3635 switch (I.getPredicate()) {
3636 case ICmpInst::ICMP_EQ:
3637 case ICmpInst::ICMP_NE:
3638 // Recognize pattern:
3639 // mulval = mul(zext A, zext B)
3640 // cmp eq/neq mulval, zext trunc mulval
3641 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
3642 if (Zext->hasOneUse()) {
3643 Value *ZextArg = Zext->getOperand(0);
3644 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
3645 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
3649 // Recognize pattern:
3650 // mulval = mul(zext A, zext B)
3651 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
3654 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
3655 if (ValToMask != MulVal)
3657 const APInt &CVal = CI->getValue() + 1;
3658 if (CVal.isPowerOf2()) {
3659 unsigned MaskWidth = CVal.logBase2();
3660 if (MaskWidth == MulWidth)
3661 break; // Recognized
3666 case ICmpInst::ICMP_UGT:
3667 // Recognize pattern:
3668 // mulval = mul(zext A, zext B)
3669 // cmp ugt mulval, max
3670 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3671 APInt MaxVal = APInt::getMaxValue(MulWidth);
3672 MaxVal = MaxVal.zext(CI->getBitWidth());
3673 if (MaxVal.eq(CI->getValue()))
3674 break; // Recognized
3678 case ICmpInst::ICMP_UGE:
3679 // Recognize pattern:
3680 // mulval = mul(zext A, zext B)
3681 // cmp uge mulval, max+1
3682 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3683 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3684 if (MaxVal.eq(CI->getValue()))
3685 break; // Recognized
3689 case ICmpInst::ICMP_ULE:
3690 // Recognize pattern:
3691 // mulval = mul(zext A, zext B)
3692 // cmp ule mulval, max
3693 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3694 APInt MaxVal = APInt::getMaxValue(MulWidth);
3695 MaxVal = MaxVal.zext(CI->getBitWidth());
3696 if (MaxVal.eq(CI->getValue()))
3697 break; // Recognized
3701 case ICmpInst::ICMP_ULT:
3702 // Recognize pattern:
3703 // mulval = mul(zext A, zext B)
3704 // cmp ule mulval, max + 1
3705 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3706 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3707 if (MaxVal.eq(CI->getValue()))
3708 break; // Recognized
3716 InstCombiner::BuilderTy *Builder = IC.Builder;
3717 Builder->SetInsertPoint(MulInstr);
3719 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
3720 Value *MulA = A, *MulB = B;
3721 if (WidthA < MulWidth)
3722 MulA = Builder->CreateZExt(A, MulType);
3723 if (WidthB < MulWidth)
3724 MulB = Builder->CreateZExt(B, MulType);
3725 Value *F = Intrinsic::getDeclaration(I.getModule(),
3726 Intrinsic::umul_with_overflow, MulType);
3727 CallInst *Call = Builder->CreateCall(F, {MulA, MulB}, "umul");
3728 IC.Worklist.Add(MulInstr);
3730 // If there are uses of mul result other than the comparison, we know that
3731 // they are truncation or binary AND. Change them to use result of
3732 // mul.with.overflow and adjust properly mask/size.
3733 if (MulVal->hasNUsesOrMore(2)) {
3734 Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value");
3735 for (User *U : MulVal->users()) {
3736 if (U == &I || U == OtherVal)
3738 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3739 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
3740 IC.replaceInstUsesWith(*TI, Mul);
3742 TI->setOperand(0, Mul);
3743 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3744 assert(BO->getOpcode() == Instruction::And);
3745 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
3746 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
3747 APInt ShortMask = CI->getValue().trunc(MulWidth);
3748 Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask);
3750 cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
3751 IC.Worklist.Add(Zext);
3752 IC.replaceInstUsesWith(*BO, Zext);
3754 llvm_unreachable("Unexpected Binary operation");
3756 IC.Worklist.Add(cast<Instruction>(U));
3759 if (isa<Instruction>(OtherVal))
3760 IC.Worklist.Add(cast<Instruction>(OtherVal));
3762 // The original icmp gets replaced with the overflow value, maybe inverted
3763 // depending on predicate.
3764 bool Inverse = false;
3765 switch (I.getPredicate()) {
3766 case ICmpInst::ICMP_NE:
3768 case ICmpInst::ICMP_EQ:
3771 case ICmpInst::ICMP_UGT:
3772 case ICmpInst::ICMP_UGE:
3773 if (I.getOperand(0) == MulVal)
3777 case ICmpInst::ICMP_ULT:
3778 case ICmpInst::ICMP_ULE:
3779 if (I.getOperand(1) == MulVal)
3784 llvm_unreachable("Unexpected predicate");
3787 Value *Res = Builder->CreateExtractValue(Call, 1);
3788 return BinaryOperator::CreateNot(Res);
3791 return ExtractValueInst::Create(Call, 1);
3794 /// When performing a comparison against a constant, it is possible that not all
3795 /// the bits in the LHS are demanded. This helper method computes the mask that
3797 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth,
3800 return APInt::getSignBit(BitWidth);
3802 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
3803 if (!CI) return APInt::getAllOnesValue(BitWidth);
3804 const APInt &RHS = CI->getValue();
3806 switch (I.getPredicate()) {
3807 // For a UGT comparison, we don't care about any bits that
3808 // correspond to the trailing ones of the comparand. The value of these
3809 // bits doesn't impact the outcome of the comparison, because any value
3810 // greater than the RHS must differ in a bit higher than these due to carry.
3811 case ICmpInst::ICMP_UGT: {
3812 unsigned trailingOnes = RHS.countTrailingOnes();
3813 return APInt::getBitsSetFrom(BitWidth, trailingOnes);
3816 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
3817 // Any value less than the RHS must differ in a higher bit because of carries.
3818 case ICmpInst::ICMP_ULT: {
3819 unsigned trailingZeros = RHS.countTrailingZeros();
3820 return APInt::getBitsSetFrom(BitWidth, trailingZeros);
3824 return APInt::getAllOnesValue(BitWidth);
3828 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
3829 /// should be swapped.
3830 /// The decision is based on how many times these two operands are reused
3831 /// as subtract operands and their positions in those instructions.
3832 /// The rational is that several architectures use the same instruction for
3833 /// both subtract and cmp, thus it is better if the order of those operands
3835 /// \return true if Op0 and Op1 should be swapped.
3836 static bool swapMayExposeCSEOpportunities(const Value * Op0,
3837 const Value * Op1) {
3838 // Filter out pointer value as those cannot appears directly in subtract.
3839 // FIXME: we may want to go through inttoptrs or bitcasts.
3840 if (Op0->getType()->isPointerTy())
3842 // Count every uses of both Op0 and Op1 in a subtract.
3843 // Each time Op0 is the first operand, count -1: swapping is bad, the
3844 // subtract has already the same layout as the compare.
3845 // Each time Op0 is the second operand, count +1: swapping is good, the
3846 // subtract has a different layout as the compare.
3847 // At the end, if the benefit is greater than 0, Op0 should come second to
3848 // expose more CSE opportunities.
3849 int GlobalSwapBenefits = 0;
3850 for (const User *U : Op0->users()) {
3851 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
3852 if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
3854 // If Op0 is the first argument, this is not beneficial to swap the
3856 int LocalSwapBenefits = -1;
3857 unsigned Op1Idx = 1;
3858 if (BinOp->getOperand(Op1Idx) == Op0) {
3860 LocalSwapBenefits = 1;
3862 if (BinOp->getOperand(Op1Idx) != Op1)
3864 GlobalSwapBenefits += LocalSwapBenefits;
3866 return GlobalSwapBenefits > 0;
3869 /// \brief Check that one use is in the same block as the definition and all
3870 /// other uses are in blocks dominated by a given block.
3872 /// \param DI Definition
3874 /// \param DB Block that must dominate all uses of \p DI outside
3875 /// the parent block
3876 /// \return true when \p UI is the only use of \p DI in the parent block
3877 /// and all other uses of \p DI are in blocks dominated by \p DB.
3879 bool InstCombiner::dominatesAllUses(const Instruction *DI,
3880 const Instruction *UI,
3881 const BasicBlock *DB) const {
3882 assert(DI && UI && "Instruction not defined\n");
3883 // Ignore incomplete definitions.
3884 if (!DI->getParent())
3886 // DI and UI must be in the same block.
3887 if (DI->getParent() != UI->getParent())
3889 // Protect from self-referencing blocks.
3890 if (DI->getParent() == DB)
3892 for (const User *U : DI->users()) {
3893 auto *Usr = cast<Instruction>(U);
3894 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
3900 /// Return true when the instruction sequence within a block is select-cmp-br.
3901 static bool isChainSelectCmpBranch(const SelectInst *SI) {
3902 const BasicBlock *BB = SI->getParent();
3905 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
3906 if (!BI || BI->getNumSuccessors() != 2)
3908 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
3909 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
3914 /// \brief True when a select result is replaced by one of its operands
3915 /// in select-icmp sequence. This will eventually result in the elimination
3918 /// \param SI Select instruction
3919 /// \param Icmp Compare instruction
3920 /// \param SIOpd Operand that replaces the select
3923 /// - The replacement is global and requires dominator information
3924 /// - The caller is responsible for the actual replacement
3929 /// %4 = select i1 %3, %C* %0, %C* null
3930 /// %5 = icmp eq %C* %4, null
3931 /// br i1 %5, label %9, label %7
3933 /// ; <label>:7 ; preds = %entry
3934 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
3937 /// can be transformed to
3939 /// %5 = icmp eq %C* %0, null
3940 /// %6 = select i1 %3, i1 %5, i1 true
3941 /// br i1 %6, label %9, label %7
3943 /// ; <label>:7 ; preds = %entry
3944 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
3946 /// Similar when the first operand of the select is a constant or/and
3947 /// the compare is for not equal rather than equal.
3949 /// NOTE: The function is only called when the select and compare constants
3950 /// are equal, the optimization can work only for EQ predicates. This is not a
3951 /// major restriction since a NE compare should be 'normalized' to an equal
3952 /// compare, which usually happens in the combiner and test case
3953 /// select-cmp-br.ll checks for it.
3954 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
3955 const ICmpInst *Icmp,
3956 const unsigned SIOpd) {
3957 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
3958 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
3959 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
3960 // The check for the single predecessor is not the best that can be
3961 // done. But it protects efficiently against cases like when SI's
3962 // home block has two successors, Succ and Succ1, and Succ1 predecessor
3963 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
3964 // replaced can be reached on either path. So the uniqueness check
3965 // guarantees that the path all uses of SI (outside SI's parent) are on
3966 // is disjoint from all other paths out of SI. But that information
3967 // is more expensive to compute, and the trade-off here is in favor
3968 // of compile-time. It should also be noticed that we check for a single
3969 // predecessor and not only uniqueness. This to handle the situation when
3970 // Succ and Succ1 points to the same basic block.
3971 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
3973 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
3980 /// Try to fold the comparison based on range information we can get by checking
3981 /// whether bits are known to be zero or one in the inputs.
3982 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
3983 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3984 Type *Ty = Op0->getType();
3985 ICmpInst::Predicate Pred = I.getPredicate();
3987 // Get scalar or pointer size.
3988 unsigned BitWidth = Ty->isIntOrIntVectorTy()
3989 ? Ty->getScalarSizeInBits()
3990 : DL.getTypeSizeInBits(Ty->getScalarType());
3995 // If this is a normal comparison, it demands all bits. If it is a sign bit
3996 // comparison, it only demands the sign bit.
3997 bool IsSignBit = false;
3999 if (match(Op1, m_APInt(CmpC))) {
4001 IsSignBit = isSignBitCheck(Pred, *CmpC, UnusedBit);
4004 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
4005 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
4007 if (SimplifyDemandedBits(&I, 0,
4008 getDemandedBitsLHSMask(I, BitWidth, IsSignBit),
4009 Op0KnownZero, Op0KnownOne, 0))
4012 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
4013 Op1KnownZero, Op1KnownOne, 0))
4016 // Given the known and unknown bits, compute a range that the LHS could be
4017 // in. Compute the Min, Max and RHS values based on the known bits. For the
4018 // EQ and NE we use unsigned values.
4019 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
4020 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
4022 computeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
4024 computeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
4027 computeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
4029 computeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
4033 // If Min and Max are known to be the same, then SimplifyDemandedBits
4034 // figured out that the LHS is a constant. Constant fold this now, so that
4035 // code below can assume that Min != Max.
4036 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
4037 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1);
4038 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
4039 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min));
4041 // Based on the range information we know about the LHS, see if we can
4042 // simplify this comparison. For example, (x&4) < 8 is always true.
4045 llvm_unreachable("Unknown icmp opcode!");
4046 case ICmpInst::ICMP_EQ:
4047 case ICmpInst::ICMP_NE: {
4048 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
4049 return Pred == CmpInst::ICMP_EQ
4050 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
4051 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4054 // If all bits are known zero except for one, then we know at most one bit
4055 // is set. If the comparison is against zero, then this is a check to see if
4056 // *that* bit is set.
4057 APInt Op0KnownZeroInverted = ~Op0KnownZero;
4058 if (~Op1KnownZero == 0) {
4059 // If the LHS is an AND with the same constant, look through it.
4060 Value *LHS = nullptr;
4062 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
4063 *LHSC != Op0KnownZeroInverted)
4067 if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
4068 APInt ValToCheck = Op0KnownZeroInverted;
4069 Type *XTy = X->getType();
4070 if (ValToCheck.isPowerOf2()) {
4071 // ((1 << X) & 8) == 0 -> X != 3
4072 // ((1 << X) & 8) != 0 -> X == 3
4073 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4074 auto NewPred = ICmpInst::getInversePredicate(Pred);
4075 return new ICmpInst(NewPred, X, CmpC);
4076 } else if ((++ValToCheck).isPowerOf2()) {
4077 // ((1 << X) & 7) == 0 -> X >= 3
4078 // ((1 << X) & 7) != 0 -> X < 3
4079 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4081 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
4082 return new ICmpInst(NewPred, X, CmpC);
4086 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
4088 if (Op0KnownZeroInverted == 1 &&
4089 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
4090 // ((8 >>u X) & 1) == 0 -> X != 3
4091 // ((8 >>u X) & 1) != 0 -> X == 3
4092 unsigned CmpVal = CI->countTrailingZeros();
4093 auto NewPred = ICmpInst::getInversePredicate(Pred);
4094 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
4099 case ICmpInst::ICMP_ULT: {
4100 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
4101 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4102 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
4103 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4104 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
4105 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4108 if (match(Op1, m_APInt(CmpC))) {
4109 // A <u C -> A == C-1 if min(A)+1 == C
4110 if (Op1Max == Op0Min + 1) {
4111 Constant *CMinus1 = ConstantInt::get(Op0->getType(), *CmpC - 1);
4112 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, CMinus1);
4117 case ICmpInst::ICMP_UGT: {
4118 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
4119 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4121 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
4122 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4124 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
4125 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4128 if (match(Op1, m_APInt(CmpC))) {
4129 // A >u C -> A == C+1 if max(a)-1 == C
4130 if (*CmpC == Op0Max - 1)
4131 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4132 ConstantInt::get(Op1->getType(), *CmpC + 1));
4136 case ICmpInst::ICMP_SLT:
4137 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
4138 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4139 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
4140 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4141 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
4142 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4143 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4144 if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
4145 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4146 Builder->getInt(CI->getValue() - 1));
4149 case ICmpInst::ICMP_SGT:
4150 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
4151 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4152 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
4153 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4155 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
4156 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4157 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4158 if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
4159 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4160 Builder->getInt(CI->getValue() + 1));
4163 case ICmpInst::ICMP_SGE:
4164 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
4165 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
4166 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4167 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
4168 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4170 case ICmpInst::ICMP_SLE:
4171 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
4172 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
4173 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4174 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
4175 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4177 case ICmpInst::ICMP_UGE:
4178 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
4179 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
4180 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4181 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
4182 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4184 case ICmpInst::ICMP_ULE:
4185 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
4186 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
4187 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4188 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
4189 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4193 // Turn a signed comparison into an unsigned one if both operands are known to
4194 // have the same sign.
4196 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
4197 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
4198 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
4203 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
4204 /// it into the appropriate icmp lt or icmp gt instruction. This transform
4205 /// allows them to be folded in visitICmpInst.
4206 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
4207 ICmpInst::Predicate Pred = I.getPredicate();
4208 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE &&
4209 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE)
4212 Value *Op0 = I.getOperand(0);
4213 Value *Op1 = I.getOperand(1);
4214 auto *Op1C = dyn_cast<Constant>(Op1);
4218 // Check if the constant operand can be safely incremented/decremented without
4219 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled
4220 // the edge cases for us, so we just assert on them. For vectors, we must
4221 // handle the edge cases.
4222 Type *Op1Type = Op1->getType();
4223 bool IsSigned = I.isSigned();
4224 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE);
4225 auto *CI = dyn_cast<ConstantInt>(Op1C);
4227 // A <= MAX -> TRUE ; A >= MIN -> TRUE
4228 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned));
4229 } else if (Op1Type->isVectorTy()) {
4230 // TODO? If the edge cases for vectors were guaranteed to be handled as they
4231 // are for scalar, we could remove the min/max checks. However, to do that,
4232 // we would have to use insertelement/shufflevector to replace edge values.
4233 unsigned NumElts = Op1Type->getVectorNumElements();
4234 for (unsigned i = 0; i != NumElts; ++i) {
4235 Constant *Elt = Op1C->getAggregateElement(i);
4239 if (isa<UndefValue>(Elt))
4242 // Bail out if we can't determine if this constant is min/max or if we
4243 // know that this constant is min/max.
4244 auto *CI = dyn_cast<ConstantInt>(Elt);
4245 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned)))
4253 // Increment or decrement the constant and set the new comparison predicate:
4254 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT
4255 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true);
4256 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT;
4257 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred;
4258 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne));
4261 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
4262 bool Changed = false;
4263 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4264 unsigned Op0Cplxity = getComplexity(Op0);
4265 unsigned Op1Cplxity = getComplexity(Op1);
4267 /// Orders the operands of the compare so that they are listed from most
4268 /// complex to least complex. This puts constants before unary operators,
4269 /// before binary operators.
4270 if (Op0Cplxity < Op1Cplxity ||
4271 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
4273 std::swap(Op0, Op1);
4278 SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, &TLI, &DT, &AC, &I))
4279 return replaceInstUsesWith(I, V);
4281 // comparing -val or val with non-zero is the same as just comparing val
4282 // ie, abs(val) != 0 -> val != 0
4283 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
4284 Value *Cond, *SelectTrue, *SelectFalse;
4285 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
4286 m_Value(SelectFalse)))) {
4287 if (Value *V = dyn_castNegVal(SelectTrue)) {
4288 if (V == SelectFalse)
4289 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4291 else if (Value *V = dyn_castNegVal(SelectFalse)) {
4292 if (V == SelectTrue)
4293 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4298 Type *Ty = Op0->getType();
4300 // icmp's with boolean values can always be turned into bitwise operations
4301 if (Ty->getScalarType()->isIntegerTy(1)) {
4302 switch (I.getPredicate()) {
4303 default: llvm_unreachable("Invalid icmp instruction!");
4304 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
4305 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName() + "tmp");
4306 return BinaryOperator::CreateNot(Xor);
4308 case ICmpInst::ICMP_NE: // icmp ne i1 A, B -> A^B
4309 return BinaryOperator::CreateXor(Op0, Op1);
4311 case ICmpInst::ICMP_UGT:
4312 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
4314 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
4315 Value *Not = Builder->CreateNot(Op0, I.getName() + "tmp");
4316 return BinaryOperator::CreateAnd(Not, Op1);
4318 case ICmpInst::ICMP_SGT:
4319 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
4321 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
4322 Value *Not = Builder->CreateNot(Op1, I.getName() + "tmp");
4323 return BinaryOperator::CreateAnd(Not, Op0);
4325 case ICmpInst::ICMP_UGE:
4326 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
4328 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
4329 Value *Not = Builder->CreateNot(Op0, I.getName() + "tmp");
4330 return BinaryOperator::CreateOr(Not, Op1);
4332 case ICmpInst::ICMP_SGE:
4333 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
4335 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
4336 Value *Not = Builder->CreateNot(Op1, I.getName() + "tmp");
4337 return BinaryOperator::CreateOr(Not, Op0);
4342 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
4345 if (Instruction *Res = foldICmpWithConstant(I))
4348 if (Instruction *Res = foldICmpUsingKnownBits(I))
4351 // Test if the ICmpInst instruction is used exclusively by a select as
4352 // part of a minimum or maximum operation. If so, refrain from doing
4353 // any other folding. This helps out other analyses which understand
4354 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4355 // and CodeGen. And in this case, at least one of the comparison
4356 // operands has at least one user besides the compare (the select),
4357 // which would often largely negate the benefit of folding anyway.
4359 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
4360 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
4361 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
4364 // FIXME: We only do this after checking for min/max to prevent infinite
4365 // looping caused by a reverse canonicalization of these patterns for min/max.
4366 // FIXME: The organization of folds is a mess. These would naturally go into
4367 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
4368 // down here after the min/max restriction.
4369 ICmpInst::Predicate Pred = I.getPredicate();
4371 if (match(Op1, m_APInt(C))) {
4372 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
4373 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
4374 Constant *Zero = Constant::getNullValue(Op0->getType());
4375 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
4378 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
4379 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
4380 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
4381 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
4385 if (Instruction *Res = foldICmpInstWithConstant(I))
4388 if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
4391 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
4392 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
4393 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
4395 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
4396 if (Instruction *NI = foldGEPICmp(GEP, Op0,
4397 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
4400 // Try to optimize equality comparisons against alloca-based pointers.
4401 if (Op0->getType()->isPointerTy() && I.isEquality()) {
4402 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
4403 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
4404 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
4406 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
4407 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
4411 // Test to see if the operands of the icmp are casted versions of other
4412 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
4414 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
4415 if (Op0->getType()->isPointerTy() &&
4416 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
4417 // We keep moving the cast from the left operand over to the right
4418 // operand, where it can often be eliminated completely.
4419 Op0 = CI->getOperand(0);
4421 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
4422 // so eliminate it as well.
4423 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
4424 Op1 = CI2->getOperand(0);
4426 // If Op1 is a constant, we can fold the cast into the constant.
4427 if (Op0->getType() != Op1->getType()) {
4428 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
4429 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
4431 // Otherwise, cast the RHS right before the icmp
4432 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
4435 return new ICmpInst(I.getPredicate(), Op0, Op1);
4439 if (isa<CastInst>(Op0)) {
4440 // Handle the special case of: icmp (cast bool to X), <cst>
4441 // This comes up when you have code like
4444 // For generality, we handle any zero-extension of any operand comparison
4445 // with a constant or another cast from the same type.
4446 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
4447 if (Instruction *R = foldICmpWithCastAndCast(I))
4451 if (Instruction *Res = foldICmpBinOp(I))
4454 if (Instruction *Res = foldICmpWithMinMax(I))
4459 // Transform (A & ~B) == 0 --> (A & B) != 0
4460 // and (A & ~B) != 0 --> (A & B) == 0
4461 // if A is a power of 2.
4462 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
4463 match(Op1, m_Zero()) &&
4464 isKnownToBeAPowerOfTwo(A, DL, false, 0, &AC, &I, &DT) && I.isEquality())
4465 return new ICmpInst(I.getInversePredicate(),
4466 Builder->CreateAnd(A, B),
4469 // ~x < ~y --> y < x
4470 // ~x < cst --> ~cst < x
4471 if (match(Op0, m_Not(m_Value(A)))) {
4472 if (match(Op1, m_Not(m_Value(B))))
4473 return new ICmpInst(I.getPredicate(), B, A);
4474 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
4475 return new ICmpInst(I.getPredicate(), ConstantExpr::getNot(RHSC), A);
4478 Instruction *AddI = nullptr;
4479 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
4480 m_Instruction(AddI))) &&
4481 isa<IntegerType>(A->getType())) {
4484 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
4486 replaceInstUsesWith(*AddI, Result);
4487 return replaceInstUsesWith(I, Overflow);
4491 // (zext a) * (zext b) --> llvm.umul.with.overflow.
4492 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4493 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
4496 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4497 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
4502 if (Instruction *Res = foldICmpEquality(I))
4505 // The 'cmpxchg' instruction returns an aggregate containing the old value and
4506 // an i1 which indicates whether or not we successfully did the swap.
4508 // Replace comparisons between the old value and the expected value with the
4509 // indicator that 'cmpxchg' returns.
4511 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
4512 // spuriously fail. In those cases, the old value may equal the expected
4513 // value but it is possible for the swap to not occur.
4514 if (I.getPredicate() == ICmpInst::ICMP_EQ)
4515 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
4516 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
4517 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
4519 return ExtractValueInst::Create(ACXI, 1);
4522 Value *X; ConstantInt *Cst;
4524 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
4525 return foldICmpAddOpConst(I, X, Cst, I.getPredicate());
4528 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
4529 return foldICmpAddOpConst(I, X, Cst, I.getSwappedPredicate());
4531 return Changed ? &I : nullptr;
4534 /// Fold fcmp ([us]itofp x, cst) if possible.
4535 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
4537 if (!isa<ConstantFP>(RHSC)) return nullptr;
4538 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
4540 // Get the width of the mantissa. We don't want to hack on conversions that
4541 // might lose information from the integer, e.g. "i64 -> float"
4542 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
4543 if (MantissaWidth == -1) return nullptr; // Unknown.
4545 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
4547 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
4549 if (I.isEquality()) {
4550 FCmpInst::Predicate P = I.getPredicate();
4551 bool IsExact = false;
4552 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
4553 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
4555 // If the floating point constant isn't an integer value, we know if we will
4556 // ever compare equal / not equal to it.
4558 // TODO: Can never be -0.0 and other non-representable values
4559 APFloat RHSRoundInt(RHS);
4560 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
4561 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
4562 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
4563 return replaceInstUsesWith(I, Builder->getFalse());
4565 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
4566 return replaceInstUsesWith(I, Builder->getTrue());
4570 // TODO: If the constant is exactly representable, is it always OK to do
4571 // equality compares as integer?
4574 // Check to see that the input is converted from an integer type that is small
4575 // enough that preserves all bits. TODO: check here for "known" sign bits.
4576 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
4577 unsigned InputSize = IntTy->getScalarSizeInBits();
4579 // Following test does NOT adjust InputSize downwards for signed inputs,
4580 // because the most negative value still requires all the mantissa bits
4581 // to distinguish it from one less than that value.
4582 if ((int)InputSize > MantissaWidth) {
4583 // Conversion would lose accuracy. Check if loss can impact comparison.
4584 int Exp = ilogb(RHS);
4585 if (Exp == APFloat::IEK_Inf) {
4586 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
4587 if (MaxExponent < (int)InputSize - !LHSUnsigned)
4588 // Conversion could create infinity.
4591 // Note that if RHS is zero or NaN, then Exp is negative
4592 // and first condition is trivially false.
4593 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
4594 // Conversion could affect comparison.
4599 // Otherwise, we can potentially simplify the comparison. We know that it
4600 // will always come through as an integer value and we know the constant is
4601 // not a NAN (it would have been previously simplified).
4602 assert(!RHS.isNaN() && "NaN comparison not already folded!");
4604 ICmpInst::Predicate Pred;
4605 switch (I.getPredicate()) {
4606 default: llvm_unreachable("Unexpected predicate!");
4607 case FCmpInst::FCMP_UEQ:
4608 case FCmpInst::FCMP_OEQ:
4609 Pred = ICmpInst::ICMP_EQ;
4611 case FCmpInst::FCMP_UGT:
4612 case FCmpInst::FCMP_OGT:
4613 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
4615 case FCmpInst::FCMP_UGE:
4616 case FCmpInst::FCMP_OGE:
4617 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
4619 case FCmpInst::FCMP_ULT:
4620 case FCmpInst::FCMP_OLT:
4621 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
4623 case FCmpInst::FCMP_ULE:
4624 case FCmpInst::FCMP_OLE:
4625 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
4627 case FCmpInst::FCMP_UNE:
4628 case FCmpInst::FCMP_ONE:
4629 Pred = ICmpInst::ICMP_NE;
4631 case FCmpInst::FCMP_ORD:
4632 return replaceInstUsesWith(I, Builder->getTrue());
4633 case FCmpInst::FCMP_UNO:
4634 return replaceInstUsesWith(I, Builder->getFalse());
4637 // Now we know that the APFloat is a normal number, zero or inf.
4639 // See if the FP constant is too large for the integer. For example,
4640 // comparing an i8 to 300.0.
4641 unsigned IntWidth = IntTy->getScalarSizeInBits();
4644 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
4645 // and large values.
4646 APFloat SMax(RHS.getSemantics());
4647 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
4648 APFloat::rmNearestTiesToEven);
4649 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
4650 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
4651 Pred == ICmpInst::ICMP_SLE)
4652 return replaceInstUsesWith(I, Builder->getTrue());
4653 return replaceInstUsesWith(I, Builder->getFalse());
4656 // If the RHS value is > UnsignedMax, fold the comparison. This handles
4657 // +INF and large values.
4658 APFloat UMax(RHS.getSemantics());
4659 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
4660 APFloat::rmNearestTiesToEven);
4661 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
4662 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
4663 Pred == ICmpInst::ICMP_ULE)
4664 return replaceInstUsesWith(I, Builder->getTrue());
4665 return replaceInstUsesWith(I, Builder->getFalse());
4670 // See if the RHS value is < SignedMin.
4671 APFloat SMin(RHS.getSemantics());
4672 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
4673 APFloat::rmNearestTiesToEven);
4674 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
4675 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
4676 Pred == ICmpInst::ICMP_SGE)
4677 return replaceInstUsesWith(I, Builder->getTrue());
4678 return replaceInstUsesWith(I, Builder->getFalse());
4681 // See if the RHS value is < UnsignedMin.
4682 APFloat SMin(RHS.getSemantics());
4683 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
4684 APFloat::rmNearestTiesToEven);
4685 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
4686 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
4687 Pred == ICmpInst::ICMP_UGE)
4688 return replaceInstUsesWith(I, Builder->getTrue());
4689 return replaceInstUsesWith(I, Builder->getFalse());
4693 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
4694 // [0, UMAX], but it may still be fractional. See if it is fractional by
4695 // casting the FP value to the integer value and back, checking for equality.
4696 // Don't do this for zero, because -0.0 is not fractional.
4697 Constant *RHSInt = LHSUnsigned
4698 ? ConstantExpr::getFPToUI(RHSC, IntTy)
4699 : ConstantExpr::getFPToSI(RHSC, IntTy);
4700 if (!RHS.isZero()) {
4701 bool Equal = LHSUnsigned
4702 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
4703 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
4705 // If we had a comparison against a fractional value, we have to adjust
4706 // the compare predicate and sometimes the value. RHSC is rounded towards
4707 // zero at this point.
4709 default: llvm_unreachable("Unexpected integer comparison!");
4710 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
4711 return replaceInstUsesWith(I, Builder->getTrue());
4712 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
4713 return replaceInstUsesWith(I, Builder->getFalse());
4714 case ICmpInst::ICMP_ULE:
4715 // (float)int <= 4.4 --> int <= 4
4716 // (float)int <= -4.4 --> false
4717 if (RHS.isNegative())
4718 return replaceInstUsesWith(I, Builder->getFalse());
4720 case ICmpInst::ICMP_SLE:
4721 // (float)int <= 4.4 --> int <= 4
4722 // (float)int <= -4.4 --> int < -4
4723 if (RHS.isNegative())
4724 Pred = ICmpInst::ICMP_SLT;
4726 case ICmpInst::ICMP_ULT:
4727 // (float)int < -4.4 --> false
4728 // (float)int < 4.4 --> int <= 4
4729 if (RHS.isNegative())
4730 return replaceInstUsesWith(I, Builder->getFalse());
4731 Pred = ICmpInst::ICMP_ULE;
4733 case ICmpInst::ICMP_SLT:
4734 // (float)int < -4.4 --> int < -4
4735 // (float)int < 4.4 --> int <= 4
4736 if (!RHS.isNegative())
4737 Pred = ICmpInst::ICMP_SLE;
4739 case ICmpInst::ICMP_UGT:
4740 // (float)int > 4.4 --> int > 4
4741 // (float)int > -4.4 --> true
4742 if (RHS.isNegative())
4743 return replaceInstUsesWith(I, Builder->getTrue());
4745 case ICmpInst::ICMP_SGT:
4746 // (float)int > 4.4 --> int > 4
4747 // (float)int > -4.4 --> int >= -4
4748 if (RHS.isNegative())
4749 Pred = ICmpInst::ICMP_SGE;
4751 case ICmpInst::ICMP_UGE:
4752 // (float)int >= -4.4 --> true
4753 // (float)int >= 4.4 --> int > 4
4754 if (RHS.isNegative())
4755 return replaceInstUsesWith(I, Builder->getTrue());
4756 Pred = ICmpInst::ICMP_UGT;
4758 case ICmpInst::ICMP_SGE:
4759 // (float)int >= -4.4 --> int >= -4
4760 // (float)int >= 4.4 --> int > 4
4761 if (!RHS.isNegative())
4762 Pred = ICmpInst::ICMP_SGT;
4768 // Lower this FP comparison into an appropriate integer version of the
4770 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
4773 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
4774 bool Changed = false;
4776 /// Orders the operands of the compare so that they are listed from most
4777 /// complex to least complex. This puts constants before unary operators,
4778 /// before binary operators.
4779 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
4784 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4786 if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1,
4787 I.getFastMathFlags(), DL, &TLI, &DT, &AC, &I))
4788 return replaceInstUsesWith(I, V);
4790 // Simplify 'fcmp pred X, X'
4792 switch (I.getPredicate()) {
4793 default: llvm_unreachable("Unknown predicate!");
4794 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
4795 case FCmpInst::FCMP_ULT: // True if unordered or less than
4796 case FCmpInst::FCMP_UGT: // True if unordered or greater than
4797 case FCmpInst::FCMP_UNE: // True if unordered or not equal
4798 // Canonicalize these to be 'fcmp uno %X, 0.0'.
4799 I.setPredicate(FCmpInst::FCMP_UNO);
4800 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4803 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
4804 case FCmpInst::FCMP_OEQ: // True if ordered and equal
4805 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
4806 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
4807 // Canonicalize these to be 'fcmp ord %X, 0.0'.
4808 I.setPredicate(FCmpInst::FCMP_ORD);
4809 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4814 // Test if the FCmpInst instruction is used exclusively by a select as
4815 // part of a minimum or maximum operation. If so, refrain from doing
4816 // any other folding. This helps out other analyses which understand
4817 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4818 // and CodeGen. And in this case, at least one of the comparison
4819 // operands has at least one user besides the compare (the select),
4820 // which would often largely negate the benefit of folding anyway.
4822 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
4823 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
4824 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
4827 // Handle fcmp with constant RHS
4828 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
4829 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
4830 switch (LHSI->getOpcode()) {
4831 case Instruction::FPExt: {
4832 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
4833 FPExtInst *LHSExt = cast<FPExtInst>(LHSI);
4834 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC);
4838 const fltSemantics *Sem;
4839 // FIXME: This shouldn't be here.
4840 if (LHSExt->getSrcTy()->isHalfTy())
4841 Sem = &APFloat::IEEEhalf();
4842 else if (LHSExt->getSrcTy()->isFloatTy())
4843 Sem = &APFloat::IEEEsingle();
4844 else if (LHSExt->getSrcTy()->isDoubleTy())
4845 Sem = &APFloat::IEEEdouble();
4846 else if (LHSExt->getSrcTy()->isFP128Ty())
4847 Sem = &APFloat::IEEEquad();
4848 else if (LHSExt->getSrcTy()->isX86_FP80Ty())
4849 Sem = &APFloat::x87DoubleExtended();
4850 else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
4851 Sem = &APFloat::PPCDoubleDouble();
4856 APFloat F = RHSF->getValueAPF();
4857 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy);
4859 // Avoid lossy conversions and denormals. Zero is a special case
4860 // that's OK to convert.
4864 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) !=
4865 APFloat::cmpLessThan) || Fabs.isZero()))
4867 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
4868 ConstantFP::get(RHSC->getContext(), F));
4871 case Instruction::PHI:
4872 // Only fold fcmp into the PHI if the phi and fcmp are in the same
4873 // block. If in the same block, we're encouraging jump threading. If
4874 // not, we are just pessimizing the code by making an i1 phi.
4875 if (LHSI->getParent() == I.getParent())
4876 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
4879 case Instruction::SIToFP:
4880 case Instruction::UIToFP:
4881 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
4884 case Instruction::FSub: {
4885 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
4887 if (match(LHSI, m_FNeg(m_Value(Op))))
4888 return new FCmpInst(I.getSwappedPredicate(), Op,
4889 ConstantExpr::getFNeg(RHSC));
4892 case Instruction::Load:
4893 if (GetElementPtrInst *GEP =
4894 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
4895 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
4896 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
4897 !cast<LoadInst>(LHSI)->isVolatile())
4898 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
4902 case Instruction::Call: {
4903 if (!RHSC->isNullValue())
4906 CallInst *CI = cast<CallInst>(LHSI);
4907 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI);
4908 if (IID != Intrinsic::fabs)
4911 // Various optimization for fabs compared with zero.
4912 switch (I.getPredicate()) {
4915 // fabs(x) < 0 --> false
4916 case FCmpInst::FCMP_OLT:
4917 llvm_unreachable("handled by SimplifyFCmpInst");
4918 // fabs(x) > 0 --> x != 0
4919 case FCmpInst::FCMP_OGT:
4920 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
4921 // fabs(x) <= 0 --> x == 0
4922 case FCmpInst::FCMP_OLE:
4923 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC);
4924 // fabs(x) >= 0 --> !isnan(x)
4925 case FCmpInst::FCMP_OGE:
4926 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC);
4927 // fabs(x) == 0 --> x == 0
4928 // fabs(x) != 0 --> x != 0
4929 case FCmpInst::FCMP_OEQ:
4930 case FCmpInst::FCMP_UEQ:
4931 case FCmpInst::FCMP_ONE:
4932 case FCmpInst::FCMP_UNE:
4933 return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), RHSC);
4939 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
4941 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
4942 return new FCmpInst(I.getSwappedPredicate(), X, Y);
4944 // fcmp (fpext x), (fpext y) -> fcmp x, y
4945 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0))
4946 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1))
4947 if (LHSExt->getSrcTy() == RHSExt->getSrcTy())
4948 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
4949 RHSExt->getOperand(0));
4951 return Changed ? &I : nullptr;