1 //===- InstCombineCompares.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitICmp and visitFCmp functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APSInt.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/GetElementPtrTypeIterator.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/KnownBits.h"
32 using namespace PatternMatch;
34 #define DEBUG_TYPE "instcombine"
36 // How many times is a select replaced by one of its operands?
37 STATISTIC(NumSel, "Number of select opts");
40 static ConstantInt *extractElement(Constant *V, Constant *Idx) {
41 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
44 static bool hasAddOverflow(ConstantInt *Result,
45 ConstantInt *In1, ConstantInt *In2,
48 return Result->getValue().ult(In1->getValue());
50 if (In2->isNegative())
51 return Result->getValue().sgt(In1->getValue());
52 return Result->getValue().slt(In1->getValue());
55 /// Compute Result = In1+In2, returning true if the result overflowed for this
57 static bool addWithOverflow(Constant *&Result, Constant *In1,
58 Constant *In2, bool IsSigned = false) {
59 Result = ConstantExpr::getAdd(In1, In2);
61 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
62 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
63 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
64 if (hasAddOverflow(extractElement(Result, Idx),
65 extractElement(In1, Idx),
66 extractElement(In2, Idx),
73 return hasAddOverflow(cast<ConstantInt>(Result),
74 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
78 static bool hasSubOverflow(ConstantInt *Result,
79 ConstantInt *In1, ConstantInt *In2,
82 return Result->getValue().ugt(In1->getValue());
84 if (In2->isNegative())
85 return Result->getValue().slt(In1->getValue());
87 return Result->getValue().sgt(In1->getValue());
90 /// Compute Result = In1-In2, returning true if the result overflowed for this
92 static bool subWithOverflow(Constant *&Result, Constant *In1,
93 Constant *In2, bool IsSigned = false) {
94 Result = ConstantExpr::getSub(In1, In2);
96 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
97 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
98 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
99 if (hasSubOverflow(extractElement(Result, Idx),
100 extractElement(In1, Idx),
101 extractElement(In2, Idx),
108 return hasSubOverflow(cast<ConstantInt>(Result),
109 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
113 /// Given an icmp instruction, return true if any use of this comparison is a
114 /// branch on sign bit comparison.
115 static bool hasBranchUse(ICmpInst &I) {
116 for (auto *U : I.users())
117 if (isa<BranchInst>(U))
122 /// Given an exploded icmp instruction, return true if the comparison only
123 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the
124 /// result of the comparison is true when the input value is signed.
125 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
126 bool &TrueIfSigned) {
128 case ICmpInst::ICMP_SLT: // True if LHS s< 0
130 return RHS.isNullValue();
131 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
133 return RHS.isAllOnesValue();
134 case ICmpInst::ICMP_SGT: // True if LHS s> -1
135 TrueIfSigned = false;
136 return RHS.isAllOnesValue();
137 case ICmpInst::ICMP_UGT:
138 // True if LHS u> RHS and RHS == high-bit-mask - 1
140 return RHS.isMaxSignedValue();
141 case ICmpInst::ICMP_UGE:
142 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
144 return RHS.isSignMask();
150 /// Returns true if the exploded icmp can be expressed as a signed comparison
151 /// to zero and updates the predicate accordingly.
152 /// The signedness of the comparison is preserved.
153 /// TODO: Refactor with decomposeBitTestICmp()?
154 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
155 if (!ICmpInst::isSigned(Pred))
159 return ICmpInst::isRelational(Pred);
161 if (C.isOneValue()) {
162 if (Pred == ICmpInst::ICMP_SLT) {
163 Pred = ICmpInst::ICMP_SLE;
166 } else if (C.isAllOnesValue()) {
167 if (Pred == ICmpInst::ICMP_SGT) {
168 Pred = ICmpInst::ICMP_SGE;
176 /// Given a signed integer type and a set of known zero and one bits, compute
177 /// the maximum and minimum values that could have the specified known zero and
178 /// known one bits, returning them in Min/Max.
179 /// TODO: Move to method on KnownBits struct?
180 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
181 APInt &Min, APInt &Max) {
182 assert(Known.getBitWidth() == Min.getBitWidth() &&
183 Known.getBitWidth() == Max.getBitWidth() &&
184 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
185 APInt UnknownBits = ~(Known.Zero|Known.One);
187 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
188 // bit if it is unknown.
190 Max = Known.One|UnknownBits;
192 if (UnknownBits.isNegative()) { // Sign bit is unknown
198 /// Given an unsigned integer type and a set of known zero and one bits, compute
199 /// the maximum and minimum values that could have the specified known zero and
200 /// known one bits, returning them in Min/Max.
201 /// TODO: Move to method on KnownBits struct?
202 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
203 APInt &Min, APInt &Max) {
204 assert(Known.getBitWidth() == Min.getBitWidth() &&
205 Known.getBitWidth() == Max.getBitWidth() &&
206 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
207 APInt UnknownBits = ~(Known.Zero|Known.One);
209 // The minimum value is when the unknown bits are all zeros.
211 // The maximum value is when the unknown bits are all ones.
212 Max = Known.One|UnknownBits;
215 /// This is called when we see this pattern:
216 /// cmp pred (load (gep GV, ...)), cmpcst
217 /// where GV is a global variable with a constant initializer. Try to simplify
218 /// this into some simple computation that does not need the load. For example
219 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
221 /// If AndCst is non-null, then the loaded value is masked with that constant
222 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
223 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
226 ConstantInt *AndCst) {
227 Constant *Init = GV->getInitializer();
228 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
231 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
232 // Don't blow up on huge arrays.
233 if (ArrayElementCount > MaxArraySizeForCombine)
236 // There are many forms of this optimization we can handle, for now, just do
237 // the simple index into a single-dimensional array.
239 // Require: GEP GV, 0, i {{, constant indices}}
240 if (GEP->getNumOperands() < 3 ||
241 !isa<ConstantInt>(GEP->getOperand(1)) ||
242 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
243 isa<Constant>(GEP->getOperand(2)))
246 // Check that indices after the variable are constants and in-range for the
247 // type they index. Collect the indices. This is typically for arrays of
249 SmallVector<unsigned, 4> LaterIndices;
251 Type *EltTy = Init->getType()->getArrayElementType();
252 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
253 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
254 if (!Idx) return nullptr; // Variable index.
256 uint64_t IdxVal = Idx->getZExtValue();
257 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
259 if (StructType *STy = dyn_cast<StructType>(EltTy))
260 EltTy = STy->getElementType(IdxVal);
261 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
262 if (IdxVal >= ATy->getNumElements()) return nullptr;
263 EltTy = ATy->getElementType();
265 return nullptr; // Unknown type.
268 LaterIndices.push_back(IdxVal);
271 enum { Overdefined = -3, Undefined = -2 };
273 // Variables for our state machines.
275 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
276 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
277 // and 87 is the second (and last) index. FirstTrueElement is -2 when
278 // undefined, otherwise set to the first true element. SecondTrueElement is
279 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
280 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
282 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
283 // form "i != 47 & i != 87". Same state transitions as for true elements.
284 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
286 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
287 /// define a state machine that triggers for ranges of values that the index
288 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
289 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
290 /// index in the range (inclusive). We use -2 for undefined here because we
291 /// use relative comparisons and don't want 0-1 to match -1.
292 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
294 // MagicBitvector - This is a magic bitvector where we set a bit if the
295 // comparison is true for element 'i'. If there are 64 elements or less in
296 // the array, this will fully represent all the comparison results.
297 uint64_t MagicBitvector = 0;
299 // Scan the array and see if one of our patterns matches.
300 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
301 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
302 Constant *Elt = Init->getAggregateElement(i);
303 if (!Elt) return nullptr;
305 // If this is indexing an array of structures, get the structure element.
306 if (!LaterIndices.empty())
307 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
309 // If the element is masked, handle it.
310 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
312 // Find out if the comparison would be true or false for the i'th element.
313 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
314 CompareRHS, DL, &TLI);
315 // If the result is undef for this element, ignore it.
316 if (isa<UndefValue>(C)) {
317 // Extend range state machines to cover this element in case there is an
318 // undef in the middle of the range.
319 if (TrueRangeEnd == (int)i-1)
321 if (FalseRangeEnd == (int)i-1)
326 // If we can't compute the result for any of the elements, we have to give
327 // up evaluating the entire conditional.
328 if (!isa<ConstantInt>(C)) return nullptr;
330 // Otherwise, we know if the comparison is true or false for this element,
331 // update our state machines.
332 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
334 // State machine for single/double/range index comparison.
336 // Update the TrueElement state machine.
337 if (FirstTrueElement == Undefined)
338 FirstTrueElement = TrueRangeEnd = i; // First true element.
340 // Update double-compare state machine.
341 if (SecondTrueElement == Undefined)
342 SecondTrueElement = i;
344 SecondTrueElement = Overdefined;
346 // Update range state machine.
347 if (TrueRangeEnd == (int)i-1)
350 TrueRangeEnd = Overdefined;
353 // Update the FalseElement state machine.
354 if (FirstFalseElement == Undefined)
355 FirstFalseElement = FalseRangeEnd = i; // First false element.
357 // Update double-compare state machine.
358 if (SecondFalseElement == Undefined)
359 SecondFalseElement = i;
361 SecondFalseElement = Overdefined;
363 // Update range state machine.
364 if (FalseRangeEnd == (int)i-1)
367 FalseRangeEnd = Overdefined;
371 // If this element is in range, update our magic bitvector.
372 if (i < 64 && IsTrueForElt)
373 MagicBitvector |= 1ULL << i;
375 // If all of our states become overdefined, bail out early. Since the
376 // predicate is expensive, only check it every 8 elements. This is only
377 // really useful for really huge arrays.
378 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
379 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
380 FalseRangeEnd == Overdefined)
384 // Now that we've scanned the entire array, emit our new comparison(s). We
385 // order the state machines in complexity of the generated code.
386 Value *Idx = GEP->getOperand(2);
388 // If the index is larger than the pointer size of the target, truncate the
389 // index down like the GEP would do implicitly. We don't have to do this for
390 // an inbounds GEP because the index can't be out of range.
391 if (!GEP->isInBounds()) {
392 Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
393 unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
394 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
395 Idx = Builder.CreateTrunc(Idx, IntPtrTy);
398 // If the comparison is only true for one or two elements, emit direct
400 if (SecondTrueElement != Overdefined) {
401 // None true -> false.
402 if (FirstTrueElement == Undefined)
403 return replaceInstUsesWith(ICI, Builder.getFalse());
405 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
407 // True for one element -> 'i == 47'.
408 if (SecondTrueElement == Undefined)
409 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
411 // True for two elements -> 'i == 47 | i == 72'.
412 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
413 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
414 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
415 return BinaryOperator::CreateOr(C1, C2);
418 // If the comparison is only false for one or two elements, emit direct
420 if (SecondFalseElement != Overdefined) {
421 // None false -> true.
422 if (FirstFalseElement == Undefined)
423 return replaceInstUsesWith(ICI, Builder.getTrue());
425 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
427 // False for one element -> 'i != 47'.
428 if (SecondFalseElement == Undefined)
429 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
431 // False for two elements -> 'i != 47 & i != 72'.
432 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
433 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
434 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
435 return BinaryOperator::CreateAnd(C1, C2);
438 // If the comparison can be replaced with a range comparison for the elements
439 // where it is true, emit the range check.
440 if (TrueRangeEnd != Overdefined) {
441 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
443 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
444 if (FirstTrueElement) {
445 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
446 Idx = Builder.CreateAdd(Idx, Offs);
449 Value *End = ConstantInt::get(Idx->getType(),
450 TrueRangeEnd-FirstTrueElement+1);
451 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
454 // False range check.
455 if (FalseRangeEnd != Overdefined) {
456 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
457 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
458 if (FirstFalseElement) {
459 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
460 Idx = Builder.CreateAdd(Idx, Offs);
463 Value *End = ConstantInt::get(Idx->getType(),
464 FalseRangeEnd-FirstFalseElement);
465 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
468 // If a magic bitvector captures the entire comparison state
469 // of this load, replace it with computation that does:
470 // ((magic_cst >> i) & 1) != 0
474 // Look for an appropriate type:
475 // - The type of Idx if the magic fits
476 // - The smallest fitting legal type if we have a DataLayout
478 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
481 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
484 Value *V = Builder.CreateIntCast(Idx, Ty, false);
485 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
486 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
487 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
494 /// Return a value that can be used to compare the *offset* implied by a GEP to
495 /// zero. For example, if we have &A[i], we want to return 'i' for
496 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
497 /// are involved. The above expression would also be legal to codegen as
498 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
499 /// This latter form is less amenable to optimization though, and we are allowed
500 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
502 /// If we can't emit an optimized form for this expression, this returns null.
504 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
505 const DataLayout &DL) {
506 gep_type_iterator GTI = gep_type_begin(GEP);
508 // Check to see if this gep only has a single variable index. If so, and if
509 // any constant indices are a multiple of its scale, then we can compute this
510 // in terms of the scale of the variable index. For example, if the GEP
511 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
512 // because the expression will cross zero at the same point.
513 unsigned i, e = GEP->getNumOperands();
515 for (i = 1; i != e; ++i, ++GTI) {
516 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
517 // Compute the aggregate offset of constant indices.
518 if (CI->isZero()) continue;
520 // Handle a struct index, which adds its field offset to the pointer.
521 if (StructType *STy = GTI.getStructTypeOrNull()) {
522 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
524 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
525 Offset += Size*CI->getSExtValue();
528 // Found our variable index.
533 // If there are no variable indices, we must have a constant offset, just
534 // evaluate it the general way.
535 if (i == e) return nullptr;
537 Value *VariableIdx = GEP->getOperand(i);
538 // Determine the scale factor of the variable element. For example, this is
539 // 4 if the variable index is into an array of i32.
540 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
542 // Verify that there are no other variable indices. If so, emit the hard way.
543 for (++i, ++GTI; i != e; ++i, ++GTI) {
544 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
545 if (!CI) return nullptr;
547 // Compute the aggregate offset of constant indices.
548 if (CI->isZero()) continue;
550 // Handle a struct index, which adds its field offset to the pointer.
551 if (StructType *STy = GTI.getStructTypeOrNull()) {
552 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
554 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
555 Offset += Size*CI->getSExtValue();
559 // Okay, we know we have a single variable index, which must be a
560 // pointer/array/vector index. If there is no offset, life is simple, return
562 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
563 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
565 // Cast to intptrty in case a truncation occurs. If an extension is needed,
566 // we don't need to bother extending: the extension won't affect where the
567 // computation crosses zero.
568 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
569 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
574 // Otherwise, there is an index. The computation we will do will be modulo
575 // the pointer size, so get it.
576 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
578 Offset &= PtrSizeMask;
579 VariableScale &= PtrSizeMask;
581 // To do this transformation, any constant index must be a multiple of the
582 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
583 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
584 // multiple of the variable scale.
585 int64_t NewOffs = Offset / (int64_t)VariableScale;
586 if (Offset != NewOffs*(int64_t)VariableScale)
589 // Okay, we can do this evaluation. Start by converting the index to intptr.
590 if (VariableIdx->getType() != IntPtrTy)
591 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
593 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
594 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
597 /// Returns true if we can rewrite Start as a GEP with pointer Base
598 /// and some integer offset. The nodes that need to be re-written
599 /// for this transformation will be added to Explored.
600 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
601 const DataLayout &DL,
602 SetVector<Value *> &Explored) {
603 SmallVector<Value *, 16> WorkList(1, Start);
604 Explored.insert(Base);
606 // The following traversal gives us an order which can be used
607 // when doing the final transformation. Since in the final
608 // transformation we create the PHI replacement instructions first,
609 // we don't have to get them in any particular order.
611 // However, for other instructions we will have to traverse the
612 // operands of an instruction first, which means that we have to
613 // do a post-order traversal.
614 while (!WorkList.empty()) {
615 SetVector<PHINode *> PHIs;
617 while (!WorkList.empty()) {
618 if (Explored.size() >= 100)
621 Value *V = WorkList.back();
623 if (Explored.count(V) != 0) {
628 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
629 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
630 // We've found some value that we can't explore which is different from
631 // the base. Therefore we can't do this transformation.
634 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
635 auto *CI = dyn_cast<CastInst>(V);
636 if (!CI->isNoopCast(DL))
639 if (Explored.count(CI->getOperand(0)) == 0)
640 WorkList.push_back(CI->getOperand(0));
643 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
644 // We're limiting the GEP to having one index. This will preserve
645 // the original pointer type. We could handle more cases in the
647 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
648 GEP->getType() != Start->getType())
651 if (Explored.count(GEP->getOperand(0)) == 0)
652 WorkList.push_back(GEP->getOperand(0));
655 if (WorkList.back() == V) {
657 // We've finished visiting this node, mark it as such.
661 if (auto *PN = dyn_cast<PHINode>(V)) {
662 // We cannot transform PHIs on unsplittable basic blocks.
663 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
670 // Explore the PHI nodes further.
671 for (auto *PN : PHIs)
672 for (Value *Op : PN->incoming_values())
673 if (Explored.count(Op) == 0)
674 WorkList.push_back(Op);
677 // Make sure that we can do this. Since we can't insert GEPs in a basic
678 // block before a PHI node, we can't easily do this transformation if
679 // we have PHI node users of transformed instructions.
680 for (Value *Val : Explored) {
681 for (Value *Use : Val->uses()) {
683 auto *PHI = dyn_cast<PHINode>(Use);
684 auto *Inst = dyn_cast<Instruction>(Val);
686 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
687 Explored.count(PHI) == 0)
690 if (PHI->getParent() == Inst->getParent())
697 // Sets the appropriate insert point on Builder where we can add
698 // a replacement Instruction for V (if that is possible).
699 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
700 bool Before = true) {
701 if (auto *PHI = dyn_cast<PHINode>(V)) {
702 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
705 if (auto *I = dyn_cast<Instruction>(V)) {
707 I = &*std::next(I->getIterator());
708 Builder.SetInsertPoint(I);
711 if (auto *A = dyn_cast<Argument>(V)) {
712 // Set the insertion point in the entry block.
713 BasicBlock &Entry = A->getParent()->getEntryBlock();
714 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
717 // Otherwise, this is a constant and we don't need to set a new
719 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
722 /// Returns a re-written value of Start as an indexed GEP using Base as a
724 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
725 const DataLayout &DL,
726 SetVector<Value *> &Explored) {
727 // Perform all the substitutions. This is a bit tricky because we can
728 // have cycles in our use-def chains.
729 // 1. Create the PHI nodes without any incoming values.
730 // 2. Create all the other values.
731 // 3. Add the edges for the PHI nodes.
732 // 4. Emit GEPs to get the original pointers.
733 // 5. Remove the original instructions.
734 Type *IndexType = IntegerType::get(
735 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType()));
737 DenseMap<Value *, Value *> NewInsts;
738 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
740 // Create the new PHI nodes, without adding any incoming values.
741 for (Value *Val : Explored) {
744 // Create empty phi nodes. This avoids cyclic dependencies when creating
745 // the remaining instructions.
746 if (auto *PHI = dyn_cast<PHINode>(Val))
747 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
748 PHI->getName() + ".idx", PHI);
750 IRBuilder<> Builder(Base->getContext());
752 // Create all the other instructions.
753 for (Value *Val : Explored) {
755 if (NewInsts.find(Val) != NewInsts.end())
758 if (auto *CI = dyn_cast<CastInst>(Val)) {
759 NewInsts[CI] = NewInsts[CI->getOperand(0)];
762 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
763 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
764 : GEP->getOperand(1);
765 setInsertionPoint(Builder, GEP);
766 // Indices might need to be sign extended. GEPs will magically do
767 // this, but we need to do it ourselves here.
768 if (Index->getType()->getScalarSizeInBits() !=
769 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
770 Index = Builder.CreateSExtOrTrunc(
771 Index, NewInsts[GEP->getOperand(0)]->getType(),
772 GEP->getOperand(0)->getName() + ".sext");
775 auto *Op = NewInsts[GEP->getOperand(0)];
776 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero())
777 NewInsts[GEP] = Index;
779 NewInsts[GEP] = Builder.CreateNSWAdd(
780 Op, Index, GEP->getOperand(0)->getName() + ".add");
783 if (isa<PHINode>(Val))
786 llvm_unreachable("Unexpected instruction type");
789 // Add the incoming values to the PHI nodes.
790 for (Value *Val : Explored) {
793 // All the instructions have been created, we can now add edges to the
795 if (auto *PHI = dyn_cast<PHINode>(Val)) {
796 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
797 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
798 Value *NewIncoming = PHI->getIncomingValue(I);
800 if (NewInsts.find(NewIncoming) != NewInsts.end())
801 NewIncoming = NewInsts[NewIncoming];
803 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
808 for (Value *Val : Explored) {
812 // Depending on the type, for external users we have to emit
813 // a GEP or a GEP + ptrtoint.
814 setInsertionPoint(Builder, Val, false);
816 // If required, create an inttoptr instruction for Base.
817 Value *NewBase = Base;
818 if (!Base->getType()->isPointerTy())
819 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
820 Start->getName() + "to.ptr");
822 Value *GEP = Builder.CreateInBoundsGEP(
823 Start->getType()->getPointerElementType(), NewBase,
824 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
826 if (!Val->getType()->isPointerTy()) {
827 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
828 Val->getName() + ".conv");
831 Val->replaceAllUsesWith(GEP);
834 return NewInsts[Start];
837 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
838 /// the input Value as a constant indexed GEP. Returns a pair containing
839 /// the GEPs Pointer and Index.
840 static std::pair<Value *, Value *>
841 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
842 Type *IndexType = IntegerType::get(V->getContext(),
843 DL.getPointerTypeSizeInBits(V->getType()));
845 Constant *Index = ConstantInt::getNullValue(IndexType);
847 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
848 // We accept only inbouds GEPs here to exclude the possibility of
850 if (!GEP->isInBounds())
852 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
853 GEP->getType() == V->getType()) {
854 V = GEP->getOperand(0);
855 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
856 Index = ConstantExpr::getAdd(
857 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
862 if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
863 if (!CI->isNoopCast(DL))
865 V = CI->getOperand(0);
868 if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
869 if (!CI->isNoopCast(DL))
871 V = CI->getOperand(0);
879 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
880 /// We can look through PHIs, GEPs and casts in order to determine a common base
881 /// between GEPLHS and RHS.
882 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
883 ICmpInst::Predicate Cond,
884 const DataLayout &DL) {
885 if (!GEPLHS->hasAllConstantIndices())
888 // Make sure the pointers have the same type.
889 if (GEPLHS->getType() != RHS->getType())
892 Value *PtrBase, *Index;
893 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
895 // The set of nodes that will take part in this transformation.
896 SetVector<Value *> Nodes;
898 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
901 // We know we can re-write this as
902 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
903 // Since we've only looked through inbouds GEPs we know that we
904 // can't have overflow on either side. We can therefore re-write
906 // OFFSET1 cmp OFFSET2
907 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
909 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
910 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
911 // offset. Since Index is the offset of LHS to the base pointer, we will now
912 // compare the offsets instead of comparing the pointers.
913 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
916 /// Fold comparisons between a GEP instruction and something else. At this point
917 /// we know that the GEP is on the LHS of the comparison.
918 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
919 ICmpInst::Predicate Cond,
921 // Don't transform signed compares of GEPs into index compares. Even if the
922 // GEP is inbounds, the final add of the base pointer can have signed overflow
923 // and would change the result of the icmp.
924 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
925 // the maximum signed value for the pointer type.
926 if (ICmpInst::isSigned(Cond))
929 // Look through bitcasts and addrspacecasts. We do not however want to remove
931 if (!isa<GetElementPtrInst>(RHS))
932 RHS = RHS->stripPointerCasts();
934 Value *PtrBase = GEPLHS->getOperand(0);
935 if (PtrBase == RHS && GEPLHS->isInBounds()) {
936 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
937 // This transformation (ignoring the base and scales) is valid because we
938 // know pointers can't overflow since the gep is inbounds. See if we can
939 // output an optimized form.
940 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
942 // If not, synthesize the offset the hard way.
944 Offset = EmitGEPOffset(GEPLHS);
945 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
946 Constant::getNullValue(Offset->getType()));
947 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
948 // If the base pointers are different, but the indices are the same, just
949 // compare the base pointer.
950 if (PtrBase != GEPRHS->getOperand(0)) {
951 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
952 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
953 GEPRHS->getOperand(0)->getType();
955 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
956 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
957 IndicesTheSame = false;
961 // If all indices are the same, just compare the base pointers.
963 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
965 // If we're comparing GEPs with two base pointers that only differ in type
966 // and both GEPs have only constant indices or just one use, then fold
967 // the compare with the adjusted indices.
968 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
969 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
970 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
971 PtrBase->stripPointerCasts() ==
972 GEPRHS->getOperand(0)->stripPointerCasts()) {
973 Value *LOffset = EmitGEPOffset(GEPLHS);
974 Value *ROffset = EmitGEPOffset(GEPRHS);
976 // If we looked through an addrspacecast between different sized address
977 // spaces, the LHS and RHS pointers are different sized
978 // integers. Truncate to the smaller one.
979 Type *LHSIndexTy = LOffset->getType();
980 Type *RHSIndexTy = ROffset->getType();
981 if (LHSIndexTy != RHSIndexTy) {
982 if (LHSIndexTy->getPrimitiveSizeInBits() <
983 RHSIndexTy->getPrimitiveSizeInBits()) {
984 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
986 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
989 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
991 return replaceInstUsesWith(I, Cmp);
994 // Otherwise, the base pointers are different and the indices are
995 // different. Try convert this to an indexed compare by looking through
997 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1000 // If one of the GEPs has all zero indices, recurse.
1001 if (GEPLHS->hasAllZeroIndices())
1002 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
1003 ICmpInst::getSwappedPredicate(Cond), I);
1005 // If the other GEP has all zero indices, recurse.
1006 if (GEPRHS->hasAllZeroIndices())
1007 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
1009 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
1010 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
1011 // If the GEPs only differ by one index, compare it.
1012 unsigned NumDifferences = 0; // Keep track of # differences.
1013 unsigned DiffOperand = 0; // The operand that differs.
1014 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
1015 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
1016 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
1017 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
1018 // Irreconcilable differences.
1022 if (NumDifferences++) break;
1027 if (NumDifferences == 0) // SAME GEP?
1028 return replaceInstUsesWith(I, // No comparison is needed here.
1029 Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond)));
1031 else if (NumDifferences == 1 && GEPsInBounds) {
1032 Value *LHSV = GEPLHS->getOperand(DiffOperand);
1033 Value *RHSV = GEPRHS->getOperand(DiffOperand);
1034 // Make sure we do a signed comparison here.
1035 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
1039 // Only lower this if the icmp is the only user of the GEP or if we expect
1040 // the result to fold to a constant!
1041 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
1042 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
1043 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
1044 Value *L = EmitGEPOffset(GEPLHS);
1045 Value *R = EmitGEPOffset(GEPRHS);
1046 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1050 // Try convert this to an indexed compare by looking through PHIs/casts as a
1052 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1055 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI,
1056 const AllocaInst *Alloca,
1057 const Value *Other) {
1058 assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1060 // It would be tempting to fold away comparisons between allocas and any
1061 // pointer not based on that alloca (e.g. an argument). However, even
1062 // though such pointers cannot alias, they can still compare equal.
1064 // But LLVM doesn't specify where allocas get their memory, so if the alloca
1065 // doesn't escape we can argue that it's impossible to guess its value, and we
1066 // can therefore act as if any such guesses are wrong.
1068 // The code below checks that the alloca doesn't escape, and that it's only
1069 // used in a comparison once (the current instruction). The
1070 // single-comparison-use condition ensures that we're trivially folding all
1071 // comparisons against the alloca consistently, and avoids the risk of
1072 // erroneously folding a comparison of the pointer with itself.
1074 unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1076 SmallVector<const Use *, 32> Worklist;
1077 for (const Use &U : Alloca->uses()) {
1078 if (Worklist.size() >= MaxIter)
1080 Worklist.push_back(&U);
1083 unsigned NumCmps = 0;
1084 while (!Worklist.empty()) {
1085 assert(Worklist.size() <= MaxIter);
1086 const Use *U = Worklist.pop_back_val();
1087 const Value *V = U->getUser();
1090 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1091 isa<SelectInst>(V)) {
1093 } else if (isa<LoadInst>(V)) {
1094 // Loading from the pointer doesn't escape it.
1096 } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1097 // Storing *to* the pointer is fine, but storing the pointer escapes it.
1098 if (SI->getValueOperand() == U->get())
1101 } else if (isa<ICmpInst>(V)) {
1103 return nullptr; // Found more than one cmp.
1105 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1106 switch (Intrin->getIntrinsicID()) {
1107 // These intrinsics don't escape or compare the pointer. Memset is safe
1108 // because we don't allow ptrtoint. Memcpy and memmove are safe because
1109 // we don't allow stores, so src cannot point to V.
1110 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1111 case Intrinsic::dbg_declare: case Intrinsic::dbg_value:
1112 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1120 for (const Use &U : V->uses()) {
1121 if (Worklist.size() >= MaxIter)
1123 Worklist.push_back(&U);
1127 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1128 return replaceInstUsesWith(
1130 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1133 /// Fold "icmp pred (X+CI), X".
1134 Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI,
1135 Value *X, ConstantInt *CI,
1136 ICmpInst::Predicate Pred) {
1137 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1138 // so the values can never be equal. Similarly for all other "or equals"
1141 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
1142 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
1143 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
1144 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1146 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
1147 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1150 // (X+1) >u X --> X <u (0-1) --> X != 255
1151 // (X+2) >u X --> X <u (0-2) --> X <u 254
1152 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
1153 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1154 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
1156 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
1157 ConstantInt *SMax = ConstantInt::get(X->getContext(),
1158 APInt::getSignedMaxValue(BitWidth));
1160 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
1161 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
1162 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
1163 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
1164 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
1165 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
1166 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1167 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
1169 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
1170 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
1171 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1172 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1173 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
1174 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
1176 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1177 Constant *C = Builder.getInt(CI->getValue() - 1);
1178 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
1181 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1182 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1183 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1184 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A,
1187 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1189 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1190 if (I.getPredicate() == I.ICMP_NE)
1191 Pred = CmpInst::getInversePredicate(Pred);
1192 return new ICmpInst(Pred, LHS, RHS);
1195 // Don't bother doing any work for cases which InstSimplify handles.
1196 if (AP2.isNullValue())
1199 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1201 if (AP2.isAllOnesValue())
1203 if (AP2.isNegative() != AP1.isNegative())
1210 // 'A' must be large enough to shift out the highest set bit.
1211 return getICmp(I.ICMP_UGT, A,
1212 ConstantInt::get(A->getType(), AP2.logBase2()));
1215 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1218 if (IsAShr && AP1.isNegative())
1219 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1221 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1224 if (IsAShr && AP1 == AP2.ashr(Shift)) {
1225 // There are multiple solutions if we are comparing against -1 and the LHS
1226 // of the ashr is not a power of two.
1227 if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1228 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1229 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1230 } else if (AP1 == AP2.lshr(Shift)) {
1231 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1235 // Shifting const2 will never be equal to const1.
1236 // FIXME: This should always be handled by InstSimplify?
1237 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1238 return replaceInstUsesWith(I, TorF);
1241 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1242 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1243 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A,
1246 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1248 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1249 if (I.getPredicate() == I.ICMP_NE)
1250 Pred = CmpInst::getInversePredicate(Pred);
1251 return new ICmpInst(Pred, LHS, RHS);
1254 // Don't bother doing any work for cases which InstSimplify handles.
1255 if (AP2.isNullValue())
1258 unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1260 if (!AP1 && AP2TrailingZeros != 0)
1263 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1266 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1268 // Get the distance between the lowest bits that are set.
1269 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1271 if (Shift > 0 && AP2.shl(Shift) == AP1)
1272 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1274 // Shifting const2 will never be equal to const1.
1275 // FIXME: This should always be handled by InstSimplify?
1276 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1277 return replaceInstUsesWith(I, TorF);
1280 /// The caller has matched a pattern of the form:
1281 /// I = icmp ugt (add (add A, B), CI2), CI1
1282 /// If this is of the form:
1284 /// if (sum+128 >u 255)
1285 /// Then replace it with llvm.sadd.with.overflow.i8.
1287 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1288 ConstantInt *CI2, ConstantInt *CI1,
1290 // The transformation we're trying to do here is to transform this into an
1291 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1292 // with a narrower add, and discard the add-with-constant that is part of the
1293 // range check (if we can't eliminate it, this isn't profitable).
1295 // In order to eliminate the add-with-constant, the compare can be its only
1297 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1298 if (!AddWithCst->hasOneUse())
1301 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1302 if (!CI2->getValue().isPowerOf2())
1304 unsigned NewWidth = CI2->getValue().countTrailingZeros();
1305 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1308 // The width of the new add formed is 1 more than the bias.
1311 // Check to see that CI1 is an all-ones value with NewWidth bits.
1312 if (CI1->getBitWidth() == NewWidth ||
1313 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1316 // This is only really a signed overflow check if the inputs have been
1317 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1318 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1319 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1320 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1321 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1324 // In order to replace the original add with a narrower
1325 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1326 // and truncates that discard the high bits of the add. Verify that this is
1328 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1329 for (User *U : OrigAdd->users()) {
1330 if (U == AddWithCst)
1333 // Only accept truncates for now. We would really like a nice recursive
1334 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1335 // chain to see which bits of a value are actually demanded. If the
1336 // original add had another add which was then immediately truncated, we
1337 // could still do the transformation.
1338 TruncInst *TI = dyn_cast<TruncInst>(U);
1339 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1343 // If the pattern matches, truncate the inputs to the narrower type and
1344 // use the sadd_with_overflow intrinsic to efficiently compute both the
1345 // result and the overflow bit.
1346 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1347 Value *F = Intrinsic::getDeclaration(I.getModule(),
1348 Intrinsic::sadd_with_overflow, NewType);
1350 InstCombiner::BuilderTy &Builder = IC.Builder;
1352 // Put the new code above the original add, in case there are any uses of the
1353 // add between the add and the compare.
1354 Builder.SetInsertPoint(OrigAdd);
1356 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1357 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1358 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1359 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1360 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1362 // The inner add was the result of the narrow add, zero extended to the
1363 // wider type. Replace it with the result computed by the intrinsic.
1364 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1366 // The original icmp gets replaced with the overflow value.
1367 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1370 // Fold icmp Pred X, C.
1371 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
1372 CmpInst::Predicate Pred = Cmp.getPredicate();
1373 Value *X = Cmp.getOperand(0);
1376 if (!match(Cmp.getOperand(1), m_APInt(C)))
1379 Value *A = nullptr, *B = nullptr;
1381 // Match the following pattern, which is a common idiom when writing
1382 // overflow-safe integer arithmetic functions. The source performs an addition
1383 // in wider type and explicitly checks for overflow using comparisons against
1384 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1386 // TODO: This could probably be generalized to handle other overflow-safe
1387 // operations if we worked out the formulas to compute the appropriate magic
1391 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1393 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1394 if (Pred == ICmpInst::ICMP_UGT &&
1395 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1396 if (Instruction *Res = processUGT_ADDCST_ADD(
1397 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this))
1401 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1402 if (C->isNullValue() && Pred == ICmpInst::ICMP_SGT) {
1403 SelectPatternResult SPR = matchSelectPattern(X, A, B);
1404 if (SPR.Flavor == SPF_SMIN) {
1405 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1406 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1407 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1408 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1412 // FIXME: Use m_APInt to allow folds for splat constants.
1413 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1));
1417 // Canonicalize icmp instructions based on dominating conditions.
1418 BasicBlock *Parent = Cmp.getParent();
1419 BasicBlock *Dom = Parent->getSinglePredecessor();
1420 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr;
1421 ICmpInst::Predicate Pred2;
1422 BasicBlock *TrueBB, *FalseBB;
1424 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)),
1425 TrueBB, FalseBB)) &&
1426 TrueBB != FalseBB) {
1428 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue());
1429 ConstantRange DominatingCR =
1431 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue())
1432 : ConstantRange::makeExactICmpRegion(
1433 CmpInst::getInversePredicate(Pred2), CI2->getValue());
1434 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1435 ConstantRange Difference = DominatingCR.difference(CR);
1436 if (Intersection.isEmptySet())
1437 return replaceInstUsesWith(Cmp, Builder.getFalse());
1438 if (Difference.isEmptySet())
1439 return replaceInstUsesWith(Cmp, Builder.getTrue());
1441 // If this is a normal comparison, it demands all bits. If it is a sign
1442 // bit comparison, it only demands the sign bit.
1444 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit);
1446 // Canonicalizing a sign bit comparison that gets used in a branch,
1447 // pessimizes codegen by generating branch on zero instruction instead
1448 // of a test and branch. So we avoid canonicalizing in such situations
1449 // because test and branch instruction has better branch displacement
1450 // than compare and branch instruction.
1451 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1454 if (auto *AI = Intersection.getSingleElement())
1455 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI));
1456 if (auto *AD = Difference.getSingleElement())
1457 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD));
1463 /// Fold icmp (trunc X, Y), C.
1464 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
1467 ICmpInst::Predicate Pred = Cmp.getPredicate();
1468 Value *X = Trunc->getOperand(0);
1469 if (C->isOneValue() && C->getBitWidth() > 1) {
1470 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1472 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1473 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1474 ConstantInt::get(V->getType(), 1));
1477 if (Cmp.isEquality() && Trunc->hasOneUse()) {
1478 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1479 // of the high bits truncated out of x are known.
1480 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1481 SrcBits = X->getType()->getScalarSizeInBits();
1482 KnownBits Known = computeKnownBits(X, 0, &Cmp);
1484 // If all the high bits are known, we can do this xform.
1485 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1486 // Pull in the high bits from known-ones set.
1487 APInt NewRHS = C->zext(SrcBits);
1488 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1489 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1496 /// Fold icmp (xor X, Y), C.
1497 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
1498 BinaryOperator *Xor,
1500 Value *X = Xor->getOperand(0);
1501 Value *Y = Xor->getOperand(1);
1503 if (!match(Y, m_APInt(XorC)))
1506 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1508 ICmpInst::Predicate Pred = Cmp.getPredicate();
1509 if ((Pred == ICmpInst::ICMP_SLT && C->isNullValue()) ||
1510 (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())) {
1512 // If the sign bit of the XorCst is not set, there is no change to
1513 // the operation, just stop using the Xor.
1514 if (!XorC->isNegative()) {
1515 Cmp.setOperand(0, X);
1520 // Was the old condition true if the operand is positive?
1521 bool isTrueIfPositive = Pred == ICmpInst::ICMP_SGT;
1523 // If so, the new one isn't.
1524 isTrueIfPositive ^= true;
1526 Constant *CmpConstant = cast<Constant>(Cmp.getOperand(1));
1527 if (isTrueIfPositive)
1528 return new ICmpInst(ICmpInst::ICMP_SGT, X, SubOne(CmpConstant));
1530 return new ICmpInst(ICmpInst::ICMP_SLT, X, AddOne(CmpConstant));
1533 if (Xor->hasOneUse()) {
1534 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1535 if (!Cmp.isEquality() && XorC->isSignMask()) {
1536 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1537 : Cmp.getSignedPredicate();
1538 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
1541 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1542 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1543 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1544 : Cmp.getSignedPredicate();
1545 Pred = Cmp.getSwappedPredicate(Pred);
1546 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
1550 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
1551 // iff -C is a power of 2
1552 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~(*C) && (*C + 1).isPowerOf2())
1553 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1555 // (icmp ult (xor X, C), -C) -> (icmp uge X, C)
1556 // iff -C is a power of 2
1557 if (Pred == ICmpInst::ICMP_ULT && *XorC == -(*C) && C->isPowerOf2())
1558 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
1563 /// Fold icmp (and (sh X, Y), C2), C1.
1564 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
1565 const APInt *C1, const APInt *C2) {
1566 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1567 if (!Shift || !Shift->isShift())
1570 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1571 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1572 // code produced by the clang front-end, for bitfield access.
1573 // This seemingly simple opportunity to fold away a shift turns out to be
1574 // rather complicated. See PR17827 for details.
1575 unsigned ShiftOpcode = Shift->getOpcode();
1576 bool IsShl = ShiftOpcode == Instruction::Shl;
1578 if (match(Shift->getOperand(1), m_APInt(C3))) {
1579 bool CanFold = false;
1580 if (ShiftOpcode == Instruction::AShr) {
1581 // There may be some constraints that make this possible, but nothing
1582 // simple has been discovered yet.
1584 } else if (ShiftOpcode == Instruction::Shl) {
1585 // For a left shift, we can fold if the comparison is not signed. We can
1586 // also fold a signed comparison if the mask value and comparison value
1587 // are not negative. These constraints may not be obvious, but we can
1588 // prove that they are correct using an SMT solver.
1589 if (!Cmp.isSigned() || (!C2->isNegative() && !C1->isNegative()))
1591 } else if (ShiftOpcode == Instruction::LShr) {
1592 // For a logical right shift, we can fold if the comparison is not signed.
1593 // We can also fold a signed comparison if the shifted mask value and the
1594 // shifted comparison value are not negative. These constraints may not be
1595 // obvious, but we can prove that they are correct using an SMT solver.
1596 if (!Cmp.isSigned() ||
1597 (!C2->shl(*C3).isNegative() && !C1->shl(*C3).isNegative()))
1602 APInt NewCst = IsShl ? C1->lshr(*C3) : C1->shl(*C3);
1603 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
1604 // Check to see if we are shifting out any of the bits being compared.
1605 if (SameAsC1 != *C1) {
1606 // If we shifted bits out, the fold is not going to work out. As a
1607 // special case, check to see if this means that the result is always
1608 // true or false now.
1609 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1610 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1611 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1612 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1614 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
1615 APInt NewAndCst = IsShl ? C2->lshr(*C3) : C2->shl(*C3);
1616 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
1617 And->setOperand(0, Shift->getOperand(0));
1618 Worklist.Add(Shift); // Shift is dead.
1624 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1625 // preferable because it allows the C2 << Y expression to be hoisted out of a
1626 // loop if Y is invariant and X is not.
1627 if (Shift->hasOneUse() && C1->isNullValue() && Cmp.isEquality() &&
1628 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1631 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1632 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1634 // Compute X & (C2 << Y).
1635 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1636 Cmp.setOperand(0, NewAnd);
1643 /// Fold icmp (and X, C2), C1.
1644 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
1645 BinaryOperator *And,
1648 if (!match(And->getOperand(1), m_APInt(C2)))
1651 if (!And->hasOneUse() || !And->getOperand(0)->hasOneUse())
1654 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1655 // the input width without changing the value produced, eliminate the cast:
1657 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1659 // We can do this transformation if the constants do not have their sign bits
1660 // set or if it is an equality comparison. Extending a relational comparison
1661 // when we're checking the sign bit would not work.
1663 if (match(And->getOperand(0), m_Trunc(m_Value(W))) &&
1664 (Cmp.isEquality() || (!C1->isNegative() && !C2->isNegative()))) {
1665 // TODO: Is this a good transform for vectors? Wider types may reduce
1666 // throughput. Should this transform be limited (even for scalars) by using
1667 // shouldChangeType()?
1668 if (!Cmp.getType()->isVectorTy()) {
1669 Type *WideType = W->getType();
1670 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1671 Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits));
1672 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1673 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1674 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1678 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, C2))
1681 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1682 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1684 // iff pred isn't signed
1685 if (!Cmp.isSigned() && C1->isNullValue() &&
1686 match(And->getOperand(1), m_One())) {
1687 Constant *One = cast<Constant>(And->getOperand(1));
1688 Value *Or = And->getOperand(0);
1689 Value *A, *B, *LShr;
1690 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1691 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1692 unsigned UsesRemoved = 0;
1693 if (And->hasOneUse())
1695 if (Or->hasOneUse())
1697 if (LShr->hasOneUse())
1700 // Compute A & ((1 << B) | 1)
1701 Value *NewOr = nullptr;
1702 if (auto *C = dyn_cast<Constant>(B)) {
1703 if (UsesRemoved >= 1)
1704 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1706 if (UsesRemoved >= 3)
1707 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1709 One, Or->getName());
1712 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1713 Cmp.setOperand(0, NewAnd);
1719 // (X & C2) > C1 --> (X & C2) != 0, if any bit set in (X & C2) will produce a
1720 // result greater than C1.
1721 unsigned NumTZ = C2->countTrailingZeros();
1722 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && NumTZ < C2->getBitWidth() &&
1723 APInt::getOneBitSet(C2->getBitWidth(), NumTZ).ugt(*C1)) {
1724 Constant *Zero = Constant::getNullValue(And->getType());
1725 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
1731 /// Fold icmp (and X, Y), C.
1732 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
1733 BinaryOperator *And,
1735 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1738 // TODO: These all require that Y is constant too, so refactor with the above.
1740 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1741 Value *X = And->getOperand(0);
1742 Value *Y = And->getOperand(1);
1743 if (auto *LI = dyn_cast<LoadInst>(X))
1744 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1745 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1746 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1747 !LI->isVolatile() && isa<ConstantInt>(Y)) {
1748 ConstantInt *C2 = cast<ConstantInt>(Y);
1749 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1753 if (!Cmp.isEquality())
1756 // X & -C == -C -> X > u ~C
1757 // X & -C != -C -> X <= u ~C
1758 // iff C is a power of 2
1759 if (Cmp.getOperand(1) == Y && (-(*C)).isPowerOf2()) {
1760 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1761 : CmpInst::ICMP_ULE;
1762 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1765 // (X & C2) == 0 -> (trunc X) >= 0
1766 // (X & C2) != 0 -> (trunc X) < 0
1767 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1769 if (And->hasOneUse() && C->isNullValue() && match(Y, m_APInt(C2))) {
1770 int32_t ExactLogBase2 = C2->exactLogBase2();
1771 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1772 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1773 if (And->getType()->isVectorTy())
1774 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
1775 Value *Trunc = Builder.CreateTrunc(X, NTy);
1776 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1777 : CmpInst::ICMP_SLT;
1778 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1785 /// Fold icmp (or X, Y), C.
1786 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
1788 ICmpInst::Predicate Pred = Cmp.getPredicate();
1789 if (C->isOneValue()) {
1790 // icmp slt signum(V) 1 --> icmp slt V, 1
1792 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1793 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1794 ConstantInt::get(V->getType(), 1));
1797 // X | C == C --> X <=u C
1798 // X | C != C --> X >u C
1799 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
1800 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) &&
1801 (*C + 1).isPowerOf2()) {
1802 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1803 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1));
1806 if (!Cmp.isEquality() || !C->isNullValue() || !Or->hasOneUse())
1810 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1811 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1812 // -> and (icmp eq P, null), (icmp eq Q, null).
1814 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1816 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1817 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1818 return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1821 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1822 // a shorter form that has more potential to be folded even further.
1823 Value *X1, *X2, *X3, *X4;
1824 if (match(Or->getOperand(0), m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1825 match(Or->getOperand(1), m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1826 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1827 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1828 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1829 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1830 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1831 return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1837 /// Fold icmp (mul X, Y), C.
1838 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp,
1839 BinaryOperator *Mul,
1842 if (!match(Mul->getOperand(1), m_APInt(MulC)))
1845 // If this is a test of the sign bit and the multiply is sign-preserving with
1846 // a constant operand, use the multiply LHS operand instead.
1847 ICmpInst::Predicate Pred = Cmp.getPredicate();
1848 if (isSignTest(Pred, *C) && Mul->hasNoSignedWrap()) {
1849 if (MulC->isNegative())
1850 Pred = ICmpInst::getSwappedPredicate(Pred);
1851 return new ICmpInst(Pred, Mul->getOperand(0),
1852 Constant::getNullValue(Mul->getType()));
1858 /// Fold icmp (shl 1, Y), C.
1859 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1862 if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1865 Type *ShiftType = Shl->getType();
1866 uint32_t TypeBits = C->getBitWidth();
1867 bool CIsPowerOf2 = C->isPowerOf2();
1868 ICmpInst::Predicate Pred = Cmp.getPredicate();
1869 if (Cmp.isUnsigned()) {
1870 // (1 << Y) pred C -> Y pred Log2(C)
1872 // (1 << Y) < 30 -> Y <= 4
1873 // (1 << Y) <= 30 -> Y <= 4
1874 // (1 << Y) >= 30 -> Y > 4
1875 // (1 << Y) > 30 -> Y > 4
1876 if (Pred == ICmpInst::ICMP_ULT)
1877 Pred = ICmpInst::ICMP_ULE;
1878 else if (Pred == ICmpInst::ICMP_UGE)
1879 Pred = ICmpInst::ICMP_UGT;
1882 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1883 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31
1884 unsigned CLog2 = C->logBase2();
1885 if (CLog2 == TypeBits - 1) {
1886 if (Pred == ICmpInst::ICMP_UGE)
1887 Pred = ICmpInst::ICMP_EQ;
1888 else if (Pred == ICmpInst::ICMP_ULT)
1889 Pred = ICmpInst::ICMP_NE;
1891 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1892 } else if (Cmp.isSigned()) {
1893 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1894 if (C->isAllOnesValue()) {
1895 // (1 << Y) <= -1 -> Y == 31
1896 if (Pred == ICmpInst::ICMP_SLE)
1897 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1899 // (1 << Y) > -1 -> Y != 31
1900 if (Pred == ICmpInst::ICMP_SGT)
1901 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1903 // (1 << Y) < 0 -> Y == 31
1904 // (1 << Y) <= 0 -> Y == 31
1905 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1906 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1908 // (1 << Y) >= 0 -> Y != 31
1909 // (1 << Y) > 0 -> Y != 31
1910 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
1911 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1913 } else if (Cmp.isEquality() && CIsPowerOf2) {
1914 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C->logBase2()));
1920 /// Fold icmp (shl X, Y), C.
1921 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
1922 BinaryOperator *Shl,
1924 const APInt *ShiftVal;
1925 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
1926 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), *C, *ShiftVal);
1928 const APInt *ShiftAmt;
1929 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
1930 return foldICmpShlOne(Cmp, Shl, C);
1932 // Check that the shift amount is in range. If not, don't perform undefined
1933 // shifts. When the shift is visited, it will be simplified.
1934 unsigned TypeBits = C->getBitWidth();
1935 if (ShiftAmt->uge(TypeBits))
1938 ICmpInst::Predicate Pred = Cmp.getPredicate();
1939 Value *X = Shl->getOperand(0);
1940 Type *ShType = Shl->getType();
1942 // NSW guarantees that we are only shifting out sign bits from the high bits,
1943 // so we can ASHR the compare constant without needing a mask and eliminate
1945 if (Shl->hasNoSignedWrap()) {
1946 if (Pred == ICmpInst::ICMP_SGT) {
1947 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
1948 APInt ShiftedC = C->ashr(*ShiftAmt);
1949 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1951 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1952 // This is the same code as the SGT case, but assert the pre-condition
1953 // that is needed for this to work with equality predicates.
1954 assert(C->ashr(*ShiftAmt).shl(*ShiftAmt) == *C &&
1955 "Compare known true or false was not folded");
1956 APInt ShiftedC = C->ashr(*ShiftAmt);
1957 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1959 if (Pred == ICmpInst::ICMP_SLT) {
1960 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
1961 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
1962 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
1963 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
1964 assert(!C->isMinSignedValue() && "Unexpected icmp slt");
1965 APInt ShiftedC = (*C - 1).ashr(*ShiftAmt) + 1;
1966 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1968 // If this is a signed comparison to 0 and the shift is sign preserving,
1969 // use the shift LHS operand instead; isSignTest may change 'Pred', so only
1970 // do that if we're sure to not continue on in this function.
1971 if (isSignTest(Pred, *C))
1972 return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
1975 // NUW guarantees that we are only shifting out zero bits from the high bits,
1976 // so we can LSHR the compare constant without needing a mask and eliminate
1978 if (Shl->hasNoUnsignedWrap()) {
1979 if (Pred == ICmpInst::ICMP_UGT) {
1980 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
1981 APInt ShiftedC = C->lshr(*ShiftAmt);
1982 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1984 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1985 // This is the same code as the UGT case, but assert the pre-condition
1986 // that is needed for this to work with equality predicates.
1987 assert(C->lshr(*ShiftAmt).shl(*ShiftAmt) == *C &&
1988 "Compare known true or false was not folded");
1989 APInt ShiftedC = C->lshr(*ShiftAmt);
1990 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1992 if (Pred == ICmpInst::ICMP_ULT) {
1993 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
1994 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
1995 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
1996 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
1997 assert(C->ugt(0) && "ult 0 should have been eliminated");
1998 APInt ShiftedC = (*C - 1).lshr(*ShiftAmt) + 1;
1999 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2003 if (Cmp.isEquality() && Shl->hasOneUse()) {
2004 // Strength-reduce the shift into an 'and'.
2005 Constant *Mask = ConstantInt::get(
2007 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2008 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2009 Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt));
2010 return new ICmpInst(Pred, And, LShrC);
2013 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2014 bool TrueIfSigned = false;
2015 if (Shl->hasOneUse() && isSignBitCheck(Pred, *C, TrueIfSigned)) {
2016 // (X << 31) <s 0 --> (X & 1) != 0
2017 Constant *Mask = ConstantInt::get(
2019 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2020 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2021 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2022 And, Constant::getNullValue(ShType));
2025 // Transform (icmp pred iM (shl iM %v, N), C)
2026 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2027 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2028 // This enables us to get rid of the shift in favor of a trunc that may be
2029 // free on the target. It has the additional benefit of comparing to a
2030 // smaller constant that may be more target-friendly.
2031 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2032 if (Shl->hasOneUse() && Amt != 0 && C->countTrailingZeros() >= Amt &&
2033 DL.isLegalInteger(TypeBits - Amt)) {
2034 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2035 if (ShType->isVectorTy())
2036 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
2038 ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt));
2039 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2045 /// Fold icmp ({al}shr X, Y), C.
2046 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
2047 BinaryOperator *Shr,
2049 // An exact shr only shifts out zero bits, so:
2050 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2051 Value *X = Shr->getOperand(0);
2052 CmpInst::Predicate Pred = Cmp.getPredicate();
2053 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2055 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2057 const APInt *ShiftVal;
2058 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2059 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), *C, *ShiftVal);
2061 const APInt *ShiftAmt;
2062 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2065 // Check that the shift amount is in range. If not, don't perform undefined
2066 // shifts. When the shift is visited it will be simplified.
2067 unsigned TypeBits = C->getBitWidth();
2068 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2069 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2072 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2073 if (!Cmp.isEquality()) {
2074 // If we have an unsigned comparison and an ashr, we can't simplify this.
2075 // Similarly for signed comparisons with lshr.
2076 if (Cmp.isSigned() != IsAShr)
2079 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
2080 // by a power of 2. Since we already have logic to simplify these,
2081 // transform to div and then simplify the resultant comparison.
2082 if (IsAShr && (!Shr->isExact() || ShAmtVal == TypeBits - 1))
2085 // Revisit the shift (to delete it).
2088 Constant *DivCst = ConstantInt::get(
2089 Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
2091 Value *Tmp = IsAShr ? Builder.CreateSDiv(X, DivCst, "", Shr->isExact())
2092 : Builder.CreateUDiv(X, DivCst, "", Shr->isExact());
2094 Cmp.setOperand(0, Tmp);
2096 // If the builder folded the binop, just return it.
2097 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
2101 // Otherwise, fold this div/compare.
2102 assert(TheDiv->getOpcode() == Instruction::SDiv ||
2103 TheDiv->getOpcode() == Instruction::UDiv);
2105 Instruction *Res = foldICmpDivConstant(Cmp, TheDiv, C);
2106 assert(Res && "This div/cst should have folded!");
2110 // Handle equality comparisons of shift-by-constant.
2112 // If the comparison constant changes with the shift, the comparison cannot
2113 // succeed (bits of the comparison constant cannot match the shifted value).
2114 // This should be known by InstSimplify and already be folded to true/false.
2115 assert(((IsAShr && C->shl(ShAmtVal).ashr(ShAmtVal) == *C) ||
2116 (!IsAShr && C->shl(ShAmtVal).lshr(ShAmtVal) == *C)) &&
2117 "Expected icmp+shr simplify did not occur.");
2119 // Check if the bits shifted out are known to be zero. If so, we can compare
2120 // against the unshifted value:
2121 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2122 Constant *ShiftedCmpRHS = ConstantInt::get(Shr->getType(), *C << ShAmtVal);
2123 if (Shr->hasOneUse()) {
2125 return new ICmpInst(Pred, X, ShiftedCmpRHS);
2127 // Otherwise strength reduce the shift into an 'and'.
2128 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2129 Constant *Mask = ConstantInt::get(Shr->getType(), Val);
2130 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2131 return new ICmpInst(Pred, And, ShiftedCmpRHS);
2137 /// Fold icmp (udiv X, Y), C.
2138 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp,
2139 BinaryOperator *UDiv,
2142 if (!match(UDiv->getOperand(0), m_APInt(C2)))
2145 assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2147 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2148 Value *Y = UDiv->getOperand(1);
2149 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2150 assert(!C->isMaxValue() &&
2151 "icmp ugt X, UINT_MAX should have been simplified already.");
2152 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2153 ConstantInt::get(Y->getType(), C2->udiv(*C + 1)));
2156 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2157 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2158 assert(*C != 0 && "icmp ult X, 0 should have been simplified already.");
2159 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2160 ConstantInt::get(Y->getType(), C2->udiv(*C)));
2166 /// Fold icmp ({su}div X, Y), C.
2167 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
2168 BinaryOperator *Div,
2170 // Fold: icmp pred ([us]div X, C2), C -> range test
2171 // Fold this div into the comparison, producing a range check.
2172 // Determine, based on the divide type, what the range is being
2173 // checked. If there is an overflow on the low or high side, remember
2174 // it, otherwise compute the range [low, hi) bounding the new value.
2175 // See: InsertRangeTest above for the kinds of replacements possible.
2177 if (!match(Div->getOperand(1), m_APInt(C2)))
2180 // FIXME: If the operand types don't match the type of the divide
2181 // then don't attempt this transform. The code below doesn't have the
2182 // logic to deal with a signed divide and an unsigned compare (and
2183 // vice versa). This is because (x /s C2) <s C produces different
2184 // results than (x /s C2) <u C or (x /u C2) <s C or even
2185 // (x /u C2) <u C. Simply casting the operands and result won't
2186 // work. :( The if statement below tests that condition and bails
2188 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2189 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2192 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2193 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2194 // division-by-constant cases should be present, we can not assert that they
2195 // have happened before we reach this icmp instruction.
2196 if (C2->isNullValue() || C2->isOneValue() ||
2197 (DivIsSigned && C2->isAllOnesValue()))
2200 // TODO: We could do all of the computations below using APInt.
2201 Constant *CmpRHS = cast<Constant>(Cmp.getOperand(1));
2202 Constant *DivRHS = cast<Constant>(Div->getOperand(1));
2204 // Compute Prod = CmpRHS * DivRHS. We are essentially solving an equation of
2205 // form X / C2 = C. We solve for X by multiplying C2 (DivRHS) and C (CmpRHS).
2206 // By solving for X, we can turn this into a range check instead of computing
2208 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
2210 // Determine if the product overflows by seeing if the product is not equal to
2211 // the divide. Make sure we do the same kind of divide as in the LHS
2212 // instruction that we're folding.
2213 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS)
2214 : ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
2216 ICmpInst::Predicate Pred = Cmp.getPredicate();
2218 // If the division is known to be exact, then there is no remainder from the
2219 // divide, so the covered range size is unit, otherwise it is the divisor.
2220 Constant *RangeSize =
2221 Div->isExact() ? ConstantInt::get(Div->getType(), 1) : DivRHS;
2223 // Figure out the interval that is being checked. For example, a comparison
2224 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2225 // Compute this interval based on the constants involved and the signedness of
2226 // the compare/divide. This computes a half-open interval, keeping track of
2227 // whether either value in the interval overflows. After analysis each
2228 // overflow variable is set to 0 if it's corresponding bound variable is valid
2229 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2230 int LoOverflow = 0, HiOverflow = 0;
2231 Constant *LoBound = nullptr, *HiBound = nullptr;
2233 if (!DivIsSigned) { // udiv
2234 // e.g. X/5 op 3 --> [15, 20)
2236 HiOverflow = LoOverflow = ProdOV;
2238 // If this is not an exact divide, then many values in the range collapse
2239 // to the same result value.
2240 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2242 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2243 if (C->isNullValue()) { // (X / pos) op 0
2244 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2245 LoBound = ConstantExpr::getNeg(SubOne(RangeSize));
2246 HiBound = RangeSize;
2247 } else if (C->isStrictlyPositive()) { // (X / pos) op pos
2248 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2249 HiOverflow = LoOverflow = ProdOV;
2251 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2252 } else { // (X / pos) op neg
2253 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2254 HiBound = AddOne(Prod);
2255 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2257 Constant *DivNeg = ConstantExpr::getNeg(RangeSize);
2258 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2261 } else if (C2->isNegative()) { // Divisor is < 0.
2263 RangeSize = ConstantExpr::getNeg(RangeSize);
2264 if (C->isNullValue()) { // (X / neg) op 0
2265 // e.g. X/-5 op 0 --> [-4, 5)
2266 LoBound = AddOne(RangeSize);
2267 HiBound = ConstantExpr::getNeg(RangeSize);
2268 if (HiBound == DivRHS) { // -INTMIN = INTMIN
2269 HiOverflow = 1; // [INTMIN+1, overflow)
2270 HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
2272 } else if (C->isStrictlyPositive()) { // (X / neg) op pos
2273 // e.g. X/-5 op 3 --> [-19, -14)
2274 HiBound = AddOne(Prod);
2275 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2277 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2278 } else { // (X / neg) op neg
2279 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2280 LoOverflow = HiOverflow = ProdOV;
2282 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2285 // Dividing by a negative swaps the condition. LT <-> GT
2286 Pred = ICmpInst::getSwappedPredicate(Pred);
2289 Value *X = Div->getOperand(0);
2291 default: llvm_unreachable("Unhandled icmp opcode!");
2292 case ICmpInst::ICMP_EQ:
2293 if (LoOverflow && HiOverflow)
2294 return replaceInstUsesWith(Cmp, Builder.getFalse());
2296 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2297 ICmpInst::ICMP_UGE, X, LoBound);
2299 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2300 ICmpInst::ICMP_ULT, X, HiBound);
2301 return replaceInstUsesWith(
2302 Cmp, insertRangeTest(X, LoBound->getUniqueInteger(),
2303 HiBound->getUniqueInteger(), DivIsSigned, true));
2304 case ICmpInst::ICMP_NE:
2305 if (LoOverflow && HiOverflow)
2306 return replaceInstUsesWith(Cmp, Builder.getTrue());
2308 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2309 ICmpInst::ICMP_ULT, X, LoBound);
2311 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2312 ICmpInst::ICMP_UGE, X, HiBound);
2313 return replaceInstUsesWith(Cmp,
2314 insertRangeTest(X, LoBound->getUniqueInteger(),
2315 HiBound->getUniqueInteger(),
2316 DivIsSigned, false));
2317 case ICmpInst::ICMP_ULT:
2318 case ICmpInst::ICMP_SLT:
2319 if (LoOverflow == +1) // Low bound is greater than input range.
2320 return replaceInstUsesWith(Cmp, Builder.getTrue());
2321 if (LoOverflow == -1) // Low bound is less than input range.
2322 return replaceInstUsesWith(Cmp, Builder.getFalse());
2323 return new ICmpInst(Pred, X, LoBound);
2324 case ICmpInst::ICMP_UGT:
2325 case ICmpInst::ICMP_SGT:
2326 if (HiOverflow == +1) // High bound greater than input range.
2327 return replaceInstUsesWith(Cmp, Builder.getFalse());
2328 if (HiOverflow == -1) // High bound less than input range.
2329 return replaceInstUsesWith(Cmp, Builder.getTrue());
2330 if (Pred == ICmpInst::ICMP_UGT)
2331 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
2332 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
2338 /// Fold icmp (sub X, Y), C.
2339 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
2340 BinaryOperator *Sub,
2342 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2343 ICmpInst::Predicate Pred = Cmp.getPredicate();
2345 // The following transforms are only worth it if the only user of the subtract
2347 if (!Sub->hasOneUse())
2350 if (Sub->hasNoSignedWrap()) {
2351 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2352 if (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())
2353 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2355 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2356 if (Pred == ICmpInst::ICMP_SGT && C->isNullValue())
2357 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2359 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2360 if (Pred == ICmpInst::ICMP_SLT && C->isNullValue())
2361 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2363 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2364 if (Pred == ICmpInst::ICMP_SLT && C->isOneValue())
2365 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2369 if (!match(X, m_APInt(C2)))
2372 // C2 - Y <u C -> (Y | (C - 1)) == C2
2373 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2374 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() &&
2375 (*C2 & (*C - 1)) == (*C - 1))
2376 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, *C - 1), X);
2378 // C2 - Y >u C -> (Y | C) != C2
2379 // iff C2 & C == C and C + 1 is a power of 2
2380 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C)
2381 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, *C), X);
2386 /// Fold icmp (add X, Y), C.
2387 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
2388 BinaryOperator *Add,
2390 Value *Y = Add->getOperand(1);
2392 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2395 // Fold icmp pred (add X, C2), C.
2396 Value *X = Add->getOperand(0);
2397 Type *Ty = Add->getType();
2398 CmpInst::Predicate Pred = Cmp.getPredicate();
2400 // If the add does not wrap, we can always adjust the compare by subtracting
2401 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are
2402 // canonicalized to SGT/SLT.
2403 if (Add->hasNoSignedWrap() &&
2404 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) {
2406 APInt NewC = C->ssub_ov(*C2, Overflow);
2407 // If there is overflow, the result must be true or false.
2408 // TODO: Can we assert there is no overflow because InstSimplify always
2409 // handles those cases?
2411 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2412 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2415 auto CR = ConstantRange::makeExactICmpRegion(Pred, *C).subtract(*C2);
2416 const APInt &Upper = CR.getUpper();
2417 const APInt &Lower = CR.getLower();
2418 if (Cmp.isSigned()) {
2419 if (Lower.isSignMask())
2420 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2421 if (Upper.isSignMask())
2422 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2424 if (Lower.isMinValue())
2425 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2426 if (Upper.isMinValue())
2427 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2430 if (!Add->hasOneUse())
2433 // X+C <u C2 -> (X & -C2) == C
2434 // iff C & (C2-1) == 0
2435 // C2 is a power of 2
2436 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0)
2437 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -(*C)),
2438 ConstantExpr::getNeg(cast<Constant>(Y)));
2440 // X+C >u C2 -> (X & ~C2) != C
2442 // C2+1 is a power of 2
2443 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0)
2444 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~(*C)),
2445 ConstantExpr::getNeg(cast<Constant>(Y)));
2450 bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2451 Value *&RHS, ConstantInt *&Less,
2452 ConstantInt *&Equal,
2453 ConstantInt *&Greater) {
2454 // TODO: Generalize this to work with other comparison idioms or ensure
2455 // they get canonicalized into this form.
2457 // select i1 (a == b), i32 Equal, i32 (select i1 (a < b), i32 Less, i32
2458 // Greater), where Equal, Less and Greater are placeholders for any three
2460 ICmpInst::Predicate PredA, PredB;
2461 if (match(SI->getTrueValue(), m_ConstantInt(Equal)) &&
2462 match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) &&
2463 PredA == ICmpInst::ICMP_EQ &&
2464 match(SI->getFalseValue(),
2465 m_Select(m_ICmp(PredB, m_Specific(LHS), m_Specific(RHS)),
2466 m_ConstantInt(Less), m_ConstantInt(Greater))) &&
2467 PredB == ICmpInst::ICMP_SLT) {
2473 Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp,
2474 Instruction *Select,
2477 assert(C && "Cmp RHS should be a constant int!");
2478 // If we're testing a constant value against the result of a three way
2479 // comparison, the result can be expressed directly in terms of the
2480 // original values being compared. Note: We could possibly be more
2481 // aggressive here and remove the hasOneUse test. The original select is
2482 // really likely to simplify or sink when we remove a test of the result.
2483 Value *OrigLHS, *OrigRHS;
2484 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2485 if (Cmp.hasOneUse() &&
2486 matchThreeWayIntCompare(cast<SelectInst>(Select), OrigLHS, OrigRHS,
2487 C1LessThan, C2Equal, C3GreaterThan)) {
2488 assert(C1LessThan && C2Equal && C3GreaterThan);
2490 bool TrueWhenLessThan =
2491 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2493 bool TrueWhenEqual =
2494 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2496 bool TrueWhenGreaterThan =
2497 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2500 // This generates the new instruction that will replace the original Cmp
2501 // Instruction. Instead of enumerating the various combinations when
2502 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2503 // false, we rely on chaining of ORs and future passes of InstCombine to
2504 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2506 // When none of the three constants satisfy the predicate for the RHS (C),
2507 // the entire original Cmp can be simplified to a false.
2508 Value *Cond = Builder.getFalse();
2509 if (TrueWhenLessThan)
2510 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
2512 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
2513 if (TrueWhenGreaterThan)
2514 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
2516 return replaceInstUsesWith(Cmp, Cond);
2521 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2522 /// where X is some kind of instruction.
2523 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) {
2525 if (!match(Cmp.getOperand(1), m_APInt(C)))
2529 if (match(Cmp.getOperand(0), m_BinOp(BO))) {
2530 switch (BO->getOpcode()) {
2531 case Instruction::Xor:
2532 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
2535 case Instruction::And:
2536 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
2539 case Instruction::Or:
2540 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
2543 case Instruction::Mul:
2544 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
2547 case Instruction::Shl:
2548 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
2551 case Instruction::LShr:
2552 case Instruction::AShr:
2553 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
2556 case Instruction::UDiv:
2557 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
2560 case Instruction::SDiv:
2561 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
2564 case Instruction::Sub:
2565 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
2568 case Instruction::Add:
2569 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
2575 // TODO: These folds could be refactored to be part of the above calls.
2576 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, C))
2580 // Match against CmpInst LHS being instructions other than binary operators.
2582 if (match(Cmp.getOperand(0), m_Instruction(LHSI))) {
2583 switch (LHSI->getOpcode()) {
2584 case Instruction::Select:
2586 // For now, we only support constant integers while folding the
2587 // ICMP(SELECT)) pattern. We can extend this to support vector of integers
2588 // similar to the cases handled by binary ops above.
2589 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
2590 if (Instruction *I = foldICmpSelectConstant(Cmp, LHSI, ConstRHS))
2594 case Instruction::Trunc:
2595 if (Instruction *I = foldICmpTruncConstant(Cmp, LHSI, C))
2603 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, C))
2609 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2610 /// icmp eq/ne BO, C.
2611 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
2614 // TODO: Some of these folds could work with arbitrary constants, but this
2615 // function is limited to scalar and vector splat constants.
2616 if (!Cmp.isEquality())
2619 ICmpInst::Predicate Pred = Cmp.getPredicate();
2620 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2621 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2622 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2624 switch (BO->getOpcode()) {
2625 case Instruction::SRem:
2626 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2627 if (C->isNullValue() && BO->hasOneUse()) {
2629 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2630 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
2631 return new ICmpInst(Pred, NewRem,
2632 Constant::getNullValue(BO->getType()));
2636 case Instruction::Add: {
2637 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2639 if (match(BOp1, m_APInt(BOC))) {
2640 if (BO->hasOneUse()) {
2641 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1));
2642 return new ICmpInst(Pred, BOp0, SubC);
2644 } else if (C->isNullValue()) {
2645 // Replace ((add A, B) != 0) with (A != -B) if A or B is
2646 // efficiently invertible, or if the add has just this one use.
2647 if (Value *NegVal = dyn_castNegVal(BOp1))
2648 return new ICmpInst(Pred, BOp0, NegVal);
2649 if (Value *NegVal = dyn_castNegVal(BOp0))
2650 return new ICmpInst(Pred, NegVal, BOp1);
2651 if (BO->hasOneUse()) {
2652 Value *Neg = Builder.CreateNeg(BOp1);
2654 return new ICmpInst(Pred, BOp0, Neg);
2659 case Instruction::Xor:
2660 if (BO->hasOneUse()) {
2661 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2662 // For the xor case, we can xor two constants together, eliminating
2663 // the explicit xor.
2664 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
2665 } else if (C->isNullValue()) {
2666 // Replace ((xor A, B) != 0) with (A != B)
2667 return new ICmpInst(Pred, BOp0, BOp1);
2671 case Instruction::Sub:
2672 if (BO->hasOneUse()) {
2674 if (match(BOp0, m_APInt(BOC))) {
2675 // Replace ((sub BOC, B) != C) with (B != BOC-C).
2676 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS);
2677 return new ICmpInst(Pred, BOp1, SubC);
2678 } else if (C->isNullValue()) {
2679 // Replace ((sub A, B) != 0) with (A != B).
2680 return new ICmpInst(Pred, BOp0, BOp1);
2684 case Instruction::Or: {
2686 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
2687 // Comparing if all bits outside of a constant mask are set?
2688 // Replace (X | C) == -1 with (X & ~C) == ~C.
2689 // This removes the -1 constant.
2690 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
2691 Value *And = Builder.CreateAnd(BOp0, NotBOC);
2692 return new ICmpInst(Pred, And, NotBOC);
2696 case Instruction::And: {
2698 if (match(BOp1, m_APInt(BOC))) {
2699 // If we have ((X & C) == C), turn it into ((X & C) != 0).
2700 if (C == BOC && C->isPowerOf2())
2701 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
2702 BO, Constant::getNullValue(RHS->getType()));
2704 // Don't perform the following transforms if the AND has multiple uses
2705 if (!BO->hasOneUse())
2708 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
2709 if (BOC->isSignMask()) {
2710 Constant *Zero = Constant::getNullValue(BOp0->getType());
2711 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
2712 return new ICmpInst(NewPred, BOp0, Zero);
2715 // ((X & ~7) == 0) --> X < 8
2716 if (C->isNullValue() && (~(*BOC) + 1).isPowerOf2()) {
2717 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1));
2718 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
2719 return new ICmpInst(NewPred, BOp0, NegBOC);
2724 case Instruction::Mul:
2725 if (C->isNullValue() && BO->hasNoSignedWrap()) {
2727 if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) {
2728 // The trivial case (mul X, 0) is handled by InstSimplify.
2729 // General case : (mul X, C) != 0 iff X != 0
2730 // (mul X, C) == 0 iff X == 0
2731 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType()));
2735 case Instruction::UDiv:
2736 if (C->isNullValue()) {
2737 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
2738 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
2739 return new ICmpInst(NewPred, BOp1, BOp0);
2748 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
2749 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
2751 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0));
2752 if (!II || !Cmp.isEquality())
2755 // Handle icmp {eq|ne} <intrinsic>, Constant.
2756 Type *Ty = II->getType();
2757 switch (II->getIntrinsicID()) {
2758 case Intrinsic::bswap:
2760 Cmp.setOperand(0, II->getArgOperand(0));
2761 Cmp.setOperand(1, ConstantInt::get(Ty, C->byteSwap()));
2764 case Intrinsic::ctlz:
2765 case Intrinsic::cttz:
2766 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
2767 if (*C == C->getBitWidth()) {
2769 Cmp.setOperand(0, II->getArgOperand(0));
2770 Cmp.setOperand(1, ConstantInt::getNullValue(Ty));
2775 case Intrinsic::ctpop: {
2776 // popcount(A) == 0 -> A == 0 and likewise for !=
2777 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
2778 bool IsZero = C->isNullValue();
2779 if (IsZero || *C == C->getBitWidth()) {
2781 Cmp.setOperand(0, II->getArgOperand(0));
2783 IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty);
2784 Cmp.setOperand(1, NewOp);
2796 /// Handle icmp with constant (but not simple integer constant) RHS.
2797 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
2798 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2799 Constant *RHSC = dyn_cast<Constant>(Op1);
2800 Instruction *LHSI = dyn_cast<Instruction>(Op0);
2804 switch (LHSI->getOpcode()) {
2805 case Instruction::GetElementPtr:
2806 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
2807 if (RHSC->isNullValue() &&
2808 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
2809 return new ICmpInst(
2810 I.getPredicate(), LHSI->getOperand(0),
2811 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2813 case Instruction::PHI:
2814 // Only fold icmp into the PHI if the phi and icmp are in the same
2815 // block. If in the same block, we're encouraging jump threading. If
2816 // not, we are just pessimizing the code by making an i1 phi.
2817 if (LHSI->getParent() == I.getParent())
2818 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
2821 case Instruction::Select: {
2822 // If either operand of the select is a constant, we can fold the
2823 // comparison into the select arms, which will cause one to be
2824 // constant folded and the select turned into a bitwise or.
2825 Value *Op1 = nullptr, *Op2 = nullptr;
2826 ConstantInt *CI = nullptr;
2827 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
2828 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2829 CI = dyn_cast<ConstantInt>(Op1);
2831 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
2832 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2833 CI = dyn_cast<ConstantInt>(Op2);
2836 // We only want to perform this transformation if it will not lead to
2837 // additional code. This is true if either both sides of the select
2838 // fold to a constant (in which case the icmp is replaced with a select
2839 // which will usually simplify) or this is the only user of the
2840 // select (in which case we are trading a select+icmp for a simpler
2841 // select+icmp) or all uses of the select can be replaced based on
2842 // dominance information ("Global cases").
2843 bool Transform = false;
2846 else if (Op1 || Op2) {
2848 if (LHSI->hasOneUse())
2851 else if (CI && !CI->isZero())
2852 // When Op1 is constant try replacing select with second operand.
2853 // Otherwise Op2 is constant and try replacing select with first
2856 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
2860 Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
2863 Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
2865 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
2869 case Instruction::IntToPtr:
2870 // icmp pred inttoptr(X), null -> icmp pred X, 0
2871 if (RHSC->isNullValue() &&
2872 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
2873 return new ICmpInst(
2874 I.getPredicate(), LHSI->getOperand(0),
2875 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2878 case Instruction::Load:
2879 // Try to optimize things like "A[i] > 4" to index computations.
2880 if (GetElementPtrInst *GEP =
2881 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
2882 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
2883 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
2884 !cast<LoadInst>(LHSI)->isVolatile())
2885 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
2894 /// Try to fold icmp (binop), X or icmp X, (binop).
2895 /// TODO: A large part of this logic is duplicated in InstSimplify's
2896 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
2898 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
2899 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2901 // Special logic for binary operators.
2902 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
2903 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
2907 const CmpInst::Predicate Pred = I.getPredicate();
2908 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
2909 if (BO0 && isa<OverflowingBinaryOperator>(BO0))
2911 ICmpInst::isEquality(Pred) ||
2912 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
2913 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
2914 if (BO1 && isa<OverflowingBinaryOperator>(BO1))
2916 ICmpInst::isEquality(Pred) ||
2917 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
2918 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
2920 // Analyze the case when either Op0 or Op1 is an add instruction.
2921 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
2922 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2923 if (BO0 && BO0->getOpcode() == Instruction::Add) {
2924 A = BO0->getOperand(0);
2925 B = BO0->getOperand(1);
2927 if (BO1 && BO1->getOpcode() == Instruction::Add) {
2928 C = BO1->getOperand(0);
2929 D = BO1->getOperand(1);
2932 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2933 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
2934 return new ICmpInst(Pred, A == Op1 ? B : A,
2935 Constant::getNullValue(Op1->getType()));
2937 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2938 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
2939 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
2942 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
2943 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
2945 // Try not to increase register pressure.
2946 BO0->hasOneUse() && BO1->hasOneUse()) {
2947 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2950 // C + B == C + D -> B == D
2953 } else if (A == D) {
2954 // D + B == C + D -> B == C
2957 } else if (B == C) {
2958 // A + C == C + D -> A == D
2963 // A + D == C + D -> A == C
2967 return new ICmpInst(Pred, Y, Z);
2970 // icmp slt (X + -1), Y -> icmp sle X, Y
2971 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
2972 match(B, m_AllOnes()))
2973 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
2975 // icmp sge (X + -1), Y -> icmp sgt X, Y
2976 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
2977 match(B, m_AllOnes()))
2978 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
2980 // icmp sle (X + 1), Y -> icmp slt X, Y
2981 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
2982 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
2984 // icmp sgt (X + 1), Y -> icmp sge X, Y
2985 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
2986 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
2988 // icmp sgt X, (Y + -1) -> icmp sge X, Y
2989 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
2990 match(D, m_AllOnes()))
2991 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
2993 // icmp sle X, (Y + -1) -> icmp slt X, Y
2994 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
2995 match(D, m_AllOnes()))
2996 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
2998 // icmp sge X, (Y + 1) -> icmp sgt X, Y
2999 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
3000 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
3002 // icmp slt X, (Y + 1) -> icmp sle X, Y
3003 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
3004 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
3006 // TODO: The subtraction-related identities shown below also hold, but
3007 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
3008 // wouldn't happen even if they were implemented.
3010 // icmp ult (X - 1), Y -> icmp ule X, Y
3011 // icmp uge (X - 1), Y -> icmp ugt X, Y
3012 // icmp ugt X, (Y - 1) -> icmp uge X, Y
3013 // icmp ule X, (Y - 1) -> icmp ult X, Y
3015 // icmp ule (X + 1), Y -> icmp ult X, Y
3016 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
3017 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
3019 // icmp ugt (X + 1), Y -> icmp uge X, Y
3020 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
3021 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
3023 // icmp uge X, (Y + 1) -> icmp ugt X, Y
3024 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
3025 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
3027 // icmp ult X, (Y + 1) -> icmp ule X, Y
3028 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
3029 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
3031 // if C1 has greater magnitude than C2:
3032 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y
3033 // s.t. C3 = C1 - C2
3035 // if C2 has greater magnitude than C1:
3036 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3)
3037 // s.t. C3 = C2 - C1
3038 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
3039 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
3040 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
3041 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
3042 const APInt &AP1 = C1->getValue();
3043 const APInt &AP2 = C2->getValue();
3044 if (AP1.isNegative() == AP2.isNegative()) {
3045 APInt AP1Abs = C1->getValue().abs();
3046 APInt AP2Abs = C2->getValue().abs();
3047 if (AP1Abs.uge(AP2Abs)) {
3048 ConstantInt *C3 = Builder.getInt(AP1 - AP2);
3049 Value *NewAdd = Builder.CreateNSWAdd(A, C3);
3050 return new ICmpInst(Pred, NewAdd, C);
3052 ConstantInt *C3 = Builder.getInt(AP2 - AP1);
3053 Value *NewAdd = Builder.CreateNSWAdd(C, C3);
3054 return new ICmpInst(Pred, A, NewAdd);
3059 // Analyze the case when either Op0 or Op1 is a sub instruction.
3060 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
3065 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
3066 A = BO0->getOperand(0);
3067 B = BO0->getOperand(1);
3069 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
3070 C = BO1->getOperand(0);
3071 D = BO1->getOperand(1);
3074 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
3075 if (A == Op1 && NoOp0WrapProblem)
3076 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
3078 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
3079 if (C == Op0 && NoOp1WrapProblem)
3080 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
3082 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
3083 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
3084 // Try not to increase register pressure.
3085 BO0->hasOneUse() && BO1->hasOneUse())
3086 return new ICmpInst(Pred, A, C);
3088 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
3089 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
3090 // Try not to increase register pressure.
3091 BO0->hasOneUse() && BO1->hasOneUse())
3092 return new ICmpInst(Pred, D, B);
3094 // icmp (0-X) < cst --> x > -cst
3095 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
3097 if (match(BO0, m_Neg(m_Value(X))))
3098 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
3099 if (!RHSC->isMinValue(/*isSigned=*/true))
3100 return new ICmpInst(I.getSwappedPredicate(), X,
3101 ConstantExpr::getNeg(RHSC));
3104 BinaryOperator *SRem = nullptr;
3105 // icmp (srem X, Y), Y
3106 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
3108 // icmp Y, (srem X, Y)
3109 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3110 Op0 == BO1->getOperand(1))
3113 // We don't check hasOneUse to avoid increasing register pressure because
3114 // the value we use is the same value this instruction was already using.
3115 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3118 case ICmpInst::ICMP_EQ:
3119 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3120 case ICmpInst::ICMP_NE:
3121 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
3122 case ICmpInst::ICMP_SGT:
3123 case ICmpInst::ICMP_SGE:
3124 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
3125 Constant::getAllOnesValue(SRem->getType()));
3126 case ICmpInst::ICMP_SLT:
3127 case ICmpInst::ICMP_SLE:
3128 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
3129 Constant::getNullValue(SRem->getType()));
3133 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
3134 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
3135 switch (BO0->getOpcode()) {
3138 case Instruction::Add:
3139 case Instruction::Sub:
3140 case Instruction::Xor: {
3141 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
3142 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3145 if (match(BO0->getOperand(1), m_APInt(C))) {
3146 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
3147 if (C->isSignMask()) {
3148 ICmpInst::Predicate NewPred =
3149 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3150 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3153 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
3154 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
3155 ICmpInst::Predicate NewPred =
3156 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3157 NewPred = I.getSwappedPredicate(NewPred);
3158 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3163 case Instruction::Mul: {
3164 if (!I.isEquality())
3168 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
3170 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
3171 // Mask = -1 >> count-trailing-zeros(C).
3172 if (unsigned TZs = C->countTrailingZeros()) {
3173 Constant *Mask = ConstantInt::get(
3175 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
3176 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
3177 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
3178 return new ICmpInst(Pred, And1, And2);
3180 // If there are no trailing zeros in the multiplier, just eliminate
3181 // the multiplies (no masking is needed):
3182 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y
3183 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3187 case Instruction::UDiv:
3188 case Instruction::LShr:
3189 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
3191 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3193 case Instruction::SDiv:
3194 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
3196 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3198 case Instruction::AShr:
3199 if (!BO0->isExact() || !BO1->isExact())
3201 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3203 case Instruction::Shl: {
3204 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
3205 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
3208 if (!NSW && I.isSigned())
3210 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3216 // Transform A & (L - 1) `ult` L --> L != 0
3217 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
3218 auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
3220 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
3221 auto *Zero = Constant::getNullValue(BO0->getType());
3222 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
3229 /// Fold icmp Pred min|max(X, Y), X.
3230 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
3231 ICmpInst::Predicate Pred = Cmp.getPredicate();
3232 Value *Op0 = Cmp.getOperand(0);
3233 Value *X = Cmp.getOperand(1);
3235 // Canonicalize minimum or maximum operand to LHS of the icmp.
3236 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
3237 match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
3238 match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
3239 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
3241 Pred = Cmp.getSwappedPredicate();
3245 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
3246 // smin(X, Y) == X --> X s<= Y
3247 // smin(X, Y) s>= X --> X s<= Y
3248 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
3249 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3251 // smin(X, Y) != X --> X s> Y
3252 // smin(X, Y) s< X --> X s> Y
3253 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
3254 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3256 // These cases should be handled in InstSimplify:
3257 // smin(X, Y) s<= X --> true
3258 // smin(X, Y) s> X --> false
3262 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
3263 // smax(X, Y) == X --> X s>= Y
3264 // smax(X, Y) s<= X --> X s>= Y
3265 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
3266 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3268 // smax(X, Y) != X --> X s< Y
3269 // smax(X, Y) s> X --> X s< Y
3270 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
3271 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3273 // These cases should be handled in InstSimplify:
3274 // smax(X, Y) s>= X --> true
3275 // smax(X, Y) s< X --> false
3279 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
3280 // umin(X, Y) == X --> X u<= Y
3281 // umin(X, Y) u>= X --> X u<= Y
3282 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
3283 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
3285 // umin(X, Y) != X --> X u> Y
3286 // umin(X, Y) u< X --> X u> Y
3287 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
3288 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
3290 // These cases should be handled in InstSimplify:
3291 // umin(X, Y) u<= X --> true
3292 // umin(X, Y) u> X --> false
3296 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
3297 // umax(X, Y) == X --> X u>= Y
3298 // umax(X, Y) u<= X --> X u>= Y
3299 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
3300 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
3302 // umax(X, Y) != X --> X u< Y
3303 // umax(X, Y) u> X --> X u< Y
3304 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
3305 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
3307 // These cases should be handled in InstSimplify:
3308 // umax(X, Y) u>= X --> true
3309 // umax(X, Y) u< X --> false
3316 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
3317 if (!I.isEquality())
3320 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3321 const CmpInst::Predicate Pred = I.getPredicate();
3322 Value *A, *B, *C, *D;
3323 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
3324 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
3325 Value *OtherVal = A == Op1 ? B : A;
3326 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
3329 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
3330 // A^c1 == C^c2 --> A == C^(c1^c2)
3331 ConstantInt *C1, *C2;
3332 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
3334 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
3335 Value *Xor = Builder.CreateXor(C, NC);
3336 return new ICmpInst(Pred, A, Xor);
3339 // A^B == A^D -> B == D
3341 return new ICmpInst(Pred, B, D);
3343 return new ICmpInst(Pred, B, C);
3345 return new ICmpInst(Pred, A, D);
3347 return new ICmpInst(Pred, A, C);
3351 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
3352 // A == (A^B) -> B == 0
3353 Value *OtherVal = A == Op0 ? B : A;
3354 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
3357 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
3358 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
3359 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
3360 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
3366 } else if (A == D) {
3370 } else if (B == C) {
3374 } else if (B == D) {
3380 if (X) { // Build (X^Y) & Z
3381 Op1 = Builder.CreateXor(X, Y);
3382 Op1 = Builder.CreateAnd(Op1, Z);
3383 I.setOperand(0, Op1);
3384 I.setOperand(1, Constant::getNullValue(Op1->getType()));
3389 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
3390 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
3392 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
3393 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
3394 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
3395 match(Op1, m_ZExt(m_Value(A))))) {
3396 APInt Pow2 = Cst1->getValue() + 1;
3397 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
3398 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
3399 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
3402 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
3403 // For lshr and ashr pairs.
3404 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3405 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
3406 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3407 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
3408 unsigned TypeBits = Cst1->getBitWidth();
3409 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3410 if (ShAmt < TypeBits && ShAmt != 0) {
3411 ICmpInst::Predicate NewPred =
3412 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
3413 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
3414 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
3415 return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
3419 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
3420 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
3421 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
3422 unsigned TypeBits = Cst1->getBitWidth();
3423 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3424 if (ShAmt < TypeBits && ShAmt != 0) {
3425 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
3426 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
3427 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
3428 I.getName() + ".mask");
3429 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
3433 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
3434 // "icmp (and X, mask), cst"
3436 if (Op0->hasOneUse() &&
3437 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
3438 match(Op1, m_ConstantInt(Cst1)) &&
3439 // Only do this when A has multiple uses. This is most important to do
3440 // when it exposes other optimizations.
3442 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
3444 if (ShAmt < ASize) {
3446 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
3449 APInt CmpV = Cst1->getValue().zext(ASize);
3452 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
3453 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
3457 // If both operands are byte-swapped or bit-reversed, just compare the
3459 // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
3460 // and handle more intrinsics.
3461 if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
3462 (match(Op0, m_BitReverse(m_Value(A))) &&
3463 match(Op1, m_BitReverse(m_Value(B)))))
3464 return new ICmpInst(Pred, A, B);
3469 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so
3471 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
3472 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0));
3473 Value *LHSCIOp = LHSCI->getOperand(0);
3474 Type *SrcTy = LHSCIOp->getType();
3475 Type *DestTy = LHSCI->getType();
3478 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
3479 // integer type is the same size as the pointer type.
3480 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
3481 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
3482 Value *RHSOp = nullptr;
3483 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
3484 Value *RHSCIOp = RHSC->getOperand(0);
3485 if (RHSCIOp->getType()->getPointerAddressSpace() ==
3486 LHSCIOp->getType()->getPointerAddressSpace()) {
3487 RHSOp = RHSC->getOperand(0);
3488 // If the pointer types don't match, insert a bitcast.
3489 if (LHSCIOp->getType() != RHSOp->getType())
3490 RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType());
3492 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
3493 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
3497 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp);
3500 // The code below only handles extension cast instructions, so far.
3502 if (LHSCI->getOpcode() != Instruction::ZExt &&
3503 LHSCI->getOpcode() != Instruction::SExt)
3506 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
3507 bool isSignedCmp = ICmp.isSigned();
3509 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) {
3510 // Not an extension from the same type?
3511 RHSCIOp = CI->getOperand(0);
3512 if (RHSCIOp->getType() != LHSCIOp->getType())
3515 // If the signedness of the two casts doesn't agree (i.e. one is a sext
3516 // and the other is a zext), then we can't handle this.
3517 if (CI->getOpcode() != LHSCI->getOpcode())
3520 // Deal with equality cases early.
3521 if (ICmp.isEquality())
3522 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3524 // A signed comparison of sign extended values simplifies into a
3525 // signed comparison.
3526 if (isSignedCmp && isSignedExt)
3527 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3529 // The other three cases all fold into an unsigned comparison.
3530 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
3533 // If we aren't dealing with a constant on the RHS, exit early.
3534 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
3538 // Compute the constant that would happen if we truncated to SrcTy then
3539 // re-extended to DestTy.
3540 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
3541 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy);
3543 // If the re-extended constant didn't change...
3545 // Deal with equality cases early.
3546 if (ICmp.isEquality())
3547 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3549 // A signed comparison of sign extended values simplifies into a
3550 // signed comparison.
3551 if (isSignedExt && isSignedCmp)
3552 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3554 // The other three cases all fold into an unsigned comparison.
3555 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1);
3558 // The re-extended constant changed, partly changed (in the case of a vector),
3559 // or could not be determined to be equal (in the case of a constant
3560 // expression), so the constant cannot be represented in the shorter type.
3561 // Consequently, we cannot emit a simple comparison.
3562 // All the cases that fold to true or false will have already been handled
3563 // by SimplifyICmpInst, so only deal with the tricky case.
3565 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C))
3568 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
3569 // should have been folded away previously and not enter in here.
3571 // We're performing an unsigned comp with a sign extended value.
3572 // This is true if the input is >= 0. [aka >s -1]
3573 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
3574 Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
3576 // Finally, return the value computed.
3577 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
3578 return replaceInstUsesWith(ICmp, Result);
3580 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
3581 return BinaryOperator::CreateNot(Result);
3584 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
3585 Value *RHS, Instruction &OrigI,
3586 Value *&Result, Constant *&Overflow) {
3587 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
3588 std::swap(LHS, RHS);
3590 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) {
3592 Overflow = OverflowVal;
3594 Result->takeName(&OrigI);
3598 // If the overflow check was an add followed by a compare, the insertion point
3599 // may be pointing to the compare. We want to insert the new instructions
3600 // before the add in case there are uses of the add between the add and the
3602 Builder.SetInsertPoint(&OrigI);
3606 llvm_unreachable("bad overflow check kind!");
3608 case OCF_UNSIGNED_ADD: {
3609 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
3610 if (OR == OverflowResult::NeverOverflows)
3611 return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(),
3614 if (OR == OverflowResult::AlwaysOverflows)
3615 return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true);
3617 // Fall through uadd into sadd
3620 case OCF_SIGNED_ADD: {
3621 // X + 0 -> {X, false}
3622 if (match(RHS, m_Zero()))
3623 return SetResult(LHS, Builder.getFalse(), false);
3625 // We can strength reduce this signed add into a regular add if we can prove
3626 // that it will never overflow.
3627 if (OCF == OCF_SIGNED_ADD)
3628 if (willNotOverflowSignedAdd(LHS, RHS, OrigI))
3629 return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(),
3634 case OCF_UNSIGNED_SUB:
3635 case OCF_SIGNED_SUB: {
3636 // X - 0 -> {X, false}
3637 if (match(RHS, m_Zero()))
3638 return SetResult(LHS, Builder.getFalse(), false);
3640 if (OCF == OCF_SIGNED_SUB) {
3641 if (willNotOverflowSignedSub(LHS, RHS, OrigI))
3642 return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(),
3645 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI))
3646 return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(),
3652 case OCF_UNSIGNED_MUL: {
3653 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
3654 if (OR == OverflowResult::NeverOverflows)
3655 return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(),
3657 if (OR == OverflowResult::AlwaysOverflows)
3658 return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true);
3661 case OCF_SIGNED_MUL:
3662 // X * undef -> undef
3663 if (isa<UndefValue>(RHS))
3664 return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false);
3666 // X * 0 -> {0, false}
3667 if (match(RHS, m_Zero()))
3668 return SetResult(RHS, Builder.getFalse(), false);
3670 // X * 1 -> {X, false}
3671 if (match(RHS, m_One()))
3672 return SetResult(LHS, Builder.getFalse(), false);
3674 if (OCF == OCF_SIGNED_MUL)
3675 if (willNotOverflowSignedMul(LHS, RHS, OrigI))
3676 return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(),
3684 /// \brief Recognize and process idiom involving test for multiplication
3687 /// The caller has matched a pattern of the form:
3688 /// I = cmp u (mul(zext A, zext B), V
3689 /// The function checks if this is a test for overflow and if so replaces
3690 /// multiplication with call to 'mul.with.overflow' intrinsic.
3692 /// \param I Compare instruction.
3693 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
3694 /// the compare instruction. Must be of integer type.
3695 /// \param OtherVal The other argument of compare instruction.
3696 /// \returns Instruction which must replace the compare instruction, NULL if no
3697 /// replacement required.
3698 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
3699 Value *OtherVal, InstCombiner &IC) {
3700 // Don't bother doing this transformation for pointers, don't do it for
3702 if (!isa<IntegerType>(MulVal->getType()))
3705 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
3706 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
3707 auto *MulInstr = dyn_cast<Instruction>(MulVal);
3710 assert(MulInstr->getOpcode() == Instruction::Mul);
3712 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
3713 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
3714 assert(LHS->getOpcode() == Instruction::ZExt);
3715 assert(RHS->getOpcode() == Instruction::ZExt);
3716 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
3718 // Calculate type and width of the result produced by mul.with.overflow.
3719 Type *TyA = A->getType(), *TyB = B->getType();
3720 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
3721 WidthB = TyB->getPrimitiveSizeInBits();
3724 if (WidthB > WidthA) {
3732 // In order to replace the original mul with a narrower mul.with.overflow,
3733 // all uses must ignore upper bits of the product. The number of used low
3734 // bits must be not greater than the width of mul.with.overflow.
3735 if (MulVal->hasNUsesOrMore(2))
3736 for (User *U : MulVal->users()) {
3739 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3740 // Check if truncation ignores bits above MulWidth.
3741 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
3742 if (TruncWidth > MulWidth)
3744 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3745 // Check if AND ignores bits above MulWidth.
3746 if (BO->getOpcode() != Instruction::And)
3748 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
3749 const APInt &CVal = CI->getValue();
3750 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
3753 // In this case we could have the operand of the binary operation
3754 // being defined in another block, and performing the replacement
3755 // could break the dominance relation.
3759 // Other uses prohibit this transformation.
3764 // Recognize patterns
3765 switch (I.getPredicate()) {
3766 case ICmpInst::ICMP_EQ:
3767 case ICmpInst::ICMP_NE:
3768 // Recognize pattern:
3769 // mulval = mul(zext A, zext B)
3770 // cmp eq/neq mulval, zext trunc mulval
3771 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
3772 if (Zext->hasOneUse()) {
3773 Value *ZextArg = Zext->getOperand(0);
3774 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
3775 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
3779 // Recognize pattern:
3780 // mulval = mul(zext A, zext B)
3781 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
3784 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
3785 if (ValToMask != MulVal)
3787 const APInt &CVal = CI->getValue() + 1;
3788 if (CVal.isPowerOf2()) {
3789 unsigned MaskWidth = CVal.logBase2();
3790 if (MaskWidth == MulWidth)
3791 break; // Recognized
3796 case ICmpInst::ICMP_UGT:
3797 // Recognize pattern:
3798 // mulval = mul(zext A, zext B)
3799 // cmp ugt mulval, max
3800 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3801 APInt MaxVal = APInt::getMaxValue(MulWidth);
3802 MaxVal = MaxVal.zext(CI->getBitWidth());
3803 if (MaxVal.eq(CI->getValue()))
3804 break; // Recognized
3808 case ICmpInst::ICMP_UGE:
3809 // Recognize pattern:
3810 // mulval = mul(zext A, zext B)
3811 // cmp uge mulval, max+1
3812 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3813 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3814 if (MaxVal.eq(CI->getValue()))
3815 break; // Recognized
3819 case ICmpInst::ICMP_ULE:
3820 // Recognize pattern:
3821 // mulval = mul(zext A, zext B)
3822 // cmp ule mulval, max
3823 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3824 APInt MaxVal = APInt::getMaxValue(MulWidth);
3825 MaxVal = MaxVal.zext(CI->getBitWidth());
3826 if (MaxVal.eq(CI->getValue()))
3827 break; // Recognized
3831 case ICmpInst::ICMP_ULT:
3832 // Recognize pattern:
3833 // mulval = mul(zext A, zext B)
3834 // cmp ule mulval, max + 1
3835 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3836 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3837 if (MaxVal.eq(CI->getValue()))
3838 break; // Recognized
3846 InstCombiner::BuilderTy &Builder = IC.Builder;
3847 Builder.SetInsertPoint(MulInstr);
3849 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
3850 Value *MulA = A, *MulB = B;
3851 if (WidthA < MulWidth)
3852 MulA = Builder.CreateZExt(A, MulType);
3853 if (WidthB < MulWidth)
3854 MulB = Builder.CreateZExt(B, MulType);
3855 Value *F = Intrinsic::getDeclaration(I.getModule(),
3856 Intrinsic::umul_with_overflow, MulType);
3857 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
3858 IC.Worklist.Add(MulInstr);
3860 // If there are uses of mul result other than the comparison, we know that
3861 // they are truncation or binary AND. Change them to use result of
3862 // mul.with.overflow and adjust properly mask/size.
3863 if (MulVal->hasNUsesOrMore(2)) {
3864 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
3865 for (User *U : MulVal->users()) {
3866 if (U == &I || U == OtherVal)
3868 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3869 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
3870 IC.replaceInstUsesWith(*TI, Mul);
3872 TI->setOperand(0, Mul);
3873 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3874 assert(BO->getOpcode() == Instruction::And);
3875 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
3876 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
3877 APInt ShortMask = CI->getValue().trunc(MulWidth);
3878 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
3880 cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType()));
3881 IC.Worklist.Add(Zext);
3882 IC.replaceInstUsesWith(*BO, Zext);
3884 llvm_unreachable("Unexpected Binary operation");
3886 IC.Worklist.Add(cast<Instruction>(U));
3889 if (isa<Instruction>(OtherVal))
3890 IC.Worklist.Add(cast<Instruction>(OtherVal));
3892 // The original icmp gets replaced with the overflow value, maybe inverted
3893 // depending on predicate.
3894 bool Inverse = false;
3895 switch (I.getPredicate()) {
3896 case ICmpInst::ICMP_NE:
3898 case ICmpInst::ICMP_EQ:
3901 case ICmpInst::ICMP_UGT:
3902 case ICmpInst::ICMP_UGE:
3903 if (I.getOperand(0) == MulVal)
3907 case ICmpInst::ICMP_ULT:
3908 case ICmpInst::ICMP_ULE:
3909 if (I.getOperand(1) == MulVal)
3914 llvm_unreachable("Unexpected predicate");
3917 Value *Res = Builder.CreateExtractValue(Call, 1);
3918 return BinaryOperator::CreateNot(Res);
3921 return ExtractValueInst::Create(Call, 1);
3924 /// When performing a comparison against a constant, it is possible that not all
3925 /// the bits in the LHS are demanded. This helper method computes the mask that
3927 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth,
3930 return APInt::getSignMask(BitWidth);
3932 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
3933 if (!CI) return APInt::getAllOnesValue(BitWidth);
3934 const APInt &RHS = CI->getValue();
3936 switch (I.getPredicate()) {
3937 // For a UGT comparison, we don't care about any bits that
3938 // correspond to the trailing ones of the comparand. The value of these
3939 // bits doesn't impact the outcome of the comparison, because any value
3940 // greater than the RHS must differ in a bit higher than these due to carry.
3941 case ICmpInst::ICMP_UGT: {
3942 unsigned trailingOnes = RHS.countTrailingOnes();
3943 return APInt::getBitsSetFrom(BitWidth, trailingOnes);
3946 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
3947 // Any value less than the RHS must differ in a higher bit because of carries.
3948 case ICmpInst::ICMP_ULT: {
3949 unsigned trailingZeros = RHS.countTrailingZeros();
3950 return APInt::getBitsSetFrom(BitWidth, trailingZeros);
3954 return APInt::getAllOnesValue(BitWidth);
3958 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
3959 /// should be swapped.
3960 /// The decision is based on how many times these two operands are reused
3961 /// as subtract operands and their positions in those instructions.
3962 /// The rational is that several architectures use the same instruction for
3963 /// both subtract and cmp, thus it is better if the order of those operands
3965 /// \return true if Op0 and Op1 should be swapped.
3966 static bool swapMayExposeCSEOpportunities(const Value * Op0,
3967 const Value * Op1) {
3968 // Filter out pointer value as those cannot appears directly in subtract.
3969 // FIXME: we may want to go through inttoptrs or bitcasts.
3970 if (Op0->getType()->isPointerTy())
3972 // Count every uses of both Op0 and Op1 in a subtract.
3973 // Each time Op0 is the first operand, count -1: swapping is bad, the
3974 // subtract has already the same layout as the compare.
3975 // Each time Op0 is the second operand, count +1: swapping is good, the
3976 // subtract has a different layout as the compare.
3977 // At the end, if the benefit is greater than 0, Op0 should come second to
3978 // expose more CSE opportunities.
3979 int GlobalSwapBenefits = 0;
3980 for (const User *U : Op0->users()) {
3981 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
3982 if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
3984 // If Op0 is the first argument, this is not beneficial to swap the
3986 int LocalSwapBenefits = -1;
3987 unsigned Op1Idx = 1;
3988 if (BinOp->getOperand(Op1Idx) == Op0) {
3990 LocalSwapBenefits = 1;
3992 if (BinOp->getOperand(Op1Idx) != Op1)
3994 GlobalSwapBenefits += LocalSwapBenefits;
3996 return GlobalSwapBenefits > 0;
3999 /// \brief Check that one use is in the same block as the definition and all
4000 /// other uses are in blocks dominated by a given block.
4002 /// \param DI Definition
4004 /// \param DB Block that must dominate all uses of \p DI outside
4005 /// the parent block
4006 /// \return true when \p UI is the only use of \p DI in the parent block
4007 /// and all other uses of \p DI are in blocks dominated by \p DB.
4009 bool InstCombiner::dominatesAllUses(const Instruction *DI,
4010 const Instruction *UI,
4011 const BasicBlock *DB) const {
4012 assert(DI && UI && "Instruction not defined\n");
4013 // Ignore incomplete definitions.
4014 if (!DI->getParent())
4016 // DI and UI must be in the same block.
4017 if (DI->getParent() != UI->getParent())
4019 // Protect from self-referencing blocks.
4020 if (DI->getParent() == DB)
4022 for (const User *U : DI->users()) {
4023 auto *Usr = cast<Instruction>(U);
4024 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
4030 /// Return true when the instruction sequence within a block is select-cmp-br.
4031 static bool isChainSelectCmpBranch(const SelectInst *SI) {
4032 const BasicBlock *BB = SI->getParent();
4035 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
4036 if (!BI || BI->getNumSuccessors() != 2)
4038 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
4039 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
4044 /// \brief True when a select result is replaced by one of its operands
4045 /// in select-icmp sequence. This will eventually result in the elimination
4048 /// \param SI Select instruction
4049 /// \param Icmp Compare instruction
4050 /// \param SIOpd Operand that replaces the select
4053 /// - The replacement is global and requires dominator information
4054 /// - The caller is responsible for the actual replacement
4059 /// %4 = select i1 %3, %C* %0, %C* null
4060 /// %5 = icmp eq %C* %4, null
4061 /// br i1 %5, label %9, label %7
4063 /// ; <label>:7 ; preds = %entry
4064 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
4067 /// can be transformed to
4069 /// %5 = icmp eq %C* %0, null
4070 /// %6 = select i1 %3, i1 %5, i1 true
4071 /// br i1 %6, label %9, label %7
4073 /// ; <label>:7 ; preds = %entry
4074 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
4076 /// Similar when the first operand of the select is a constant or/and
4077 /// the compare is for not equal rather than equal.
4079 /// NOTE: The function is only called when the select and compare constants
4080 /// are equal, the optimization can work only for EQ predicates. This is not a
4081 /// major restriction since a NE compare should be 'normalized' to an equal
4082 /// compare, which usually happens in the combiner and test case
4083 /// select-cmp-br.ll checks for it.
4084 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
4085 const ICmpInst *Icmp,
4086 const unsigned SIOpd) {
4087 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
4088 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
4089 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
4090 // The check for the single predecessor is not the best that can be
4091 // done. But it protects efficiently against cases like when SI's
4092 // home block has two successors, Succ and Succ1, and Succ1 predecessor
4093 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
4094 // replaced can be reached on either path. So the uniqueness check
4095 // guarantees that the path all uses of SI (outside SI's parent) are on
4096 // is disjoint from all other paths out of SI. But that information
4097 // is more expensive to compute, and the trade-off here is in favor
4098 // of compile-time. It should also be noticed that we check for a single
4099 // predecessor and not only uniqueness. This to handle the situation when
4100 // Succ and Succ1 points to the same basic block.
4101 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
4103 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
4110 /// Try to fold the comparison based on range information we can get by checking
4111 /// whether bits are known to be zero or one in the inputs.
4112 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
4113 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4114 Type *Ty = Op0->getType();
4115 ICmpInst::Predicate Pred = I.getPredicate();
4117 // Get scalar or pointer size.
4118 unsigned BitWidth = Ty->isIntOrIntVectorTy()
4119 ? Ty->getScalarSizeInBits()
4120 : DL.getTypeSizeInBits(Ty->getScalarType());
4125 // If this is a normal comparison, it demands all bits. If it is a sign bit
4126 // comparison, it only demands the sign bit.
4127 bool IsSignBit = false;
4129 if (match(Op1, m_APInt(CmpC))) {
4131 IsSignBit = isSignBitCheck(Pred, *CmpC, UnusedBit);
4134 KnownBits Op0Known(BitWidth);
4135 KnownBits Op1Known(BitWidth);
4137 if (SimplifyDemandedBits(&I, 0,
4138 getDemandedBitsLHSMask(I, BitWidth, IsSignBit),
4142 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
4146 // Given the known and unknown bits, compute a range that the LHS could be
4147 // in. Compute the Min, Max and RHS values based on the known bits. For the
4148 // EQ and NE we use unsigned values.
4149 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
4150 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
4152 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4153 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4155 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4156 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4159 // If Min and Max are known to be the same, then SimplifyDemandedBits
4160 // figured out that the LHS is a constant. Constant fold this now, so that
4161 // code below can assume that Min != Max.
4162 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
4163 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1);
4164 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
4165 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min));
4167 // Based on the range information we know about the LHS, see if we can
4168 // simplify this comparison. For example, (x&4) < 8 is always true.
4171 llvm_unreachable("Unknown icmp opcode!");
4172 case ICmpInst::ICMP_EQ:
4173 case ICmpInst::ICMP_NE: {
4174 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
4175 return Pred == CmpInst::ICMP_EQ
4176 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
4177 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4180 // If all bits are known zero except for one, then we know at most one bit
4181 // is set. If the comparison is against zero, then this is a check to see if
4182 // *that* bit is set.
4183 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
4184 if (Op1Known.isZero()) {
4185 // If the LHS is an AND with the same constant, look through it.
4186 Value *LHS = nullptr;
4188 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
4189 *LHSC != Op0KnownZeroInverted)
4193 if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
4194 APInt ValToCheck = Op0KnownZeroInverted;
4195 Type *XTy = X->getType();
4196 if (ValToCheck.isPowerOf2()) {
4197 // ((1 << X) & 8) == 0 -> X != 3
4198 // ((1 << X) & 8) != 0 -> X == 3
4199 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4200 auto NewPred = ICmpInst::getInversePredicate(Pred);
4201 return new ICmpInst(NewPred, X, CmpC);
4202 } else if ((++ValToCheck).isPowerOf2()) {
4203 // ((1 << X) & 7) == 0 -> X >= 3
4204 // ((1 << X) & 7) != 0 -> X < 3
4205 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4207 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
4208 return new ICmpInst(NewPred, X, CmpC);
4212 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
4214 if (Op0KnownZeroInverted.isOneValue() &&
4215 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
4216 // ((8 >>u X) & 1) == 0 -> X != 3
4217 // ((8 >>u X) & 1) != 0 -> X == 3
4218 unsigned CmpVal = CI->countTrailingZeros();
4219 auto NewPred = ICmpInst::getInversePredicate(Pred);
4220 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
4225 case ICmpInst::ICMP_ULT: {
4226 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
4227 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4228 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
4229 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4230 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
4231 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4234 if (match(Op1, m_APInt(CmpC))) {
4235 // A <u C -> A == C-1 if min(A)+1 == C
4236 if (Op1Max == Op0Min + 1) {
4237 Constant *CMinus1 = ConstantInt::get(Op0->getType(), *CmpC - 1);
4238 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, CMinus1);
4243 case ICmpInst::ICMP_UGT: {
4244 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
4245 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4247 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
4248 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4250 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
4251 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4254 if (match(Op1, m_APInt(CmpC))) {
4255 // A >u C -> A == C+1 if max(a)-1 == C
4256 if (*CmpC == Op0Max - 1)
4257 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4258 ConstantInt::get(Op1->getType(), *CmpC + 1));
4262 case ICmpInst::ICMP_SLT:
4263 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
4264 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4265 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
4266 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4267 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
4268 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4269 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4270 if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
4271 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4272 Builder.getInt(CI->getValue() - 1));
4275 case ICmpInst::ICMP_SGT:
4276 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
4277 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4278 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
4279 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4281 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
4282 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4283 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4284 if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
4285 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4286 Builder.getInt(CI->getValue() + 1));
4289 case ICmpInst::ICMP_SGE:
4290 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
4291 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
4292 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4293 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
4294 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4296 case ICmpInst::ICMP_SLE:
4297 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
4298 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
4299 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4300 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
4301 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4303 case ICmpInst::ICMP_UGE:
4304 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
4305 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
4306 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4307 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
4308 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4310 case ICmpInst::ICMP_ULE:
4311 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
4312 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
4313 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4314 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
4315 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4319 // Turn a signed comparison into an unsigned one if both operands are known to
4320 // have the same sign.
4322 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
4323 (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
4324 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
4329 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
4330 /// it into the appropriate icmp lt or icmp gt instruction. This transform
4331 /// allows them to be folded in visitICmpInst.
4332 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
4333 ICmpInst::Predicate Pred = I.getPredicate();
4334 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE &&
4335 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE)
4338 Value *Op0 = I.getOperand(0);
4339 Value *Op1 = I.getOperand(1);
4340 auto *Op1C = dyn_cast<Constant>(Op1);
4344 // Check if the constant operand can be safely incremented/decremented without
4345 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled
4346 // the edge cases for us, so we just assert on them. For vectors, we must
4347 // handle the edge cases.
4348 Type *Op1Type = Op1->getType();
4349 bool IsSigned = I.isSigned();
4350 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE);
4351 auto *CI = dyn_cast<ConstantInt>(Op1C);
4353 // A <= MAX -> TRUE ; A >= MIN -> TRUE
4354 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned));
4355 } else if (Op1Type->isVectorTy()) {
4356 // TODO? If the edge cases for vectors were guaranteed to be handled as they
4357 // are for scalar, we could remove the min/max checks. However, to do that,
4358 // we would have to use insertelement/shufflevector to replace edge values.
4359 unsigned NumElts = Op1Type->getVectorNumElements();
4360 for (unsigned i = 0; i != NumElts; ++i) {
4361 Constant *Elt = Op1C->getAggregateElement(i);
4365 if (isa<UndefValue>(Elt))
4368 // Bail out if we can't determine if this constant is min/max or if we
4369 // know that this constant is min/max.
4370 auto *CI = dyn_cast<ConstantInt>(Elt);
4371 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned)))
4379 // Increment or decrement the constant and set the new comparison predicate:
4380 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT
4381 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true);
4382 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT;
4383 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred;
4384 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne));
4387 /// Integer compare with boolean values can always be turned into bitwise ops.
4388 static Instruction *canonicalizeICmpBool(ICmpInst &I,
4389 InstCombiner::BuilderTy &Builder) {
4390 Value *A = I.getOperand(0), *B = I.getOperand(1);
4391 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
4393 // A boolean compared to true/false can be simplified to Op0/true/false in
4394 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
4395 // Cases not handled by InstSimplify are always 'not' of Op0.
4396 if (match(B, m_Zero())) {
4397 switch (I.getPredicate()) {
4398 case CmpInst::ICMP_EQ: // A == 0 -> !A
4399 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
4400 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
4401 return BinaryOperator::CreateNot(A);
4403 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
4405 } else if (match(B, m_One())) {
4406 switch (I.getPredicate()) {
4407 case CmpInst::ICMP_NE: // A != 1 -> !A
4408 case CmpInst::ICMP_ULT: // A <u 1 -> !A
4409 case CmpInst::ICMP_SGT: // A >s -1 -> !A
4410 return BinaryOperator::CreateNot(A);
4412 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
4416 switch (I.getPredicate()) {
4418 llvm_unreachable("Invalid icmp instruction!");
4419 case ICmpInst::ICMP_EQ:
4420 // icmp eq i1 A, B -> ~(A ^ B)
4421 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
4423 case ICmpInst::ICMP_NE:
4424 // icmp ne i1 A, B -> A ^ B
4425 return BinaryOperator::CreateXor(A, B);
4427 case ICmpInst::ICMP_UGT:
4428 // icmp ugt -> icmp ult
4431 case ICmpInst::ICMP_ULT:
4432 // icmp ult i1 A, B -> ~A & B
4433 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
4435 case ICmpInst::ICMP_SGT:
4436 // icmp sgt -> icmp slt
4439 case ICmpInst::ICMP_SLT:
4440 // icmp slt i1 A, B -> A & ~B
4441 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
4443 case ICmpInst::ICMP_UGE:
4444 // icmp uge -> icmp ule
4447 case ICmpInst::ICMP_ULE:
4448 // icmp ule i1 A, B -> ~A | B
4449 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
4451 case ICmpInst::ICMP_SGE:
4452 // icmp sge -> icmp sle
4455 case ICmpInst::ICMP_SLE:
4456 // icmp sle i1 A, B -> A | ~B
4457 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
4461 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
4462 bool Changed = false;
4463 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4464 unsigned Op0Cplxity = getComplexity(Op0);
4465 unsigned Op1Cplxity = getComplexity(Op1);
4467 /// Orders the operands of the compare so that they are listed from most
4468 /// complex to least complex. This puts constants before unary operators,
4469 /// before binary operators.
4470 if (Op0Cplxity < Op1Cplxity ||
4471 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
4473 std::swap(Op0, Op1);
4477 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1,
4478 SQ.getWithInstruction(&I)))
4479 return replaceInstUsesWith(I, V);
4481 // comparing -val or val with non-zero is the same as just comparing val
4482 // ie, abs(val) != 0 -> val != 0
4483 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
4484 Value *Cond, *SelectTrue, *SelectFalse;
4485 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
4486 m_Value(SelectFalse)))) {
4487 if (Value *V = dyn_castNegVal(SelectTrue)) {
4488 if (V == SelectFalse)
4489 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4491 else if (Value *V = dyn_castNegVal(SelectFalse)) {
4492 if (V == SelectTrue)
4493 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4498 if (Op0->getType()->isIntOrIntVectorTy(1))
4499 if (Instruction *Res = canonicalizeICmpBool(I, Builder))
4502 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
4505 if (Instruction *Res = foldICmpWithConstant(I))
4508 if (Instruction *Res = foldICmpUsingKnownBits(I))
4511 // Test if the ICmpInst instruction is used exclusively by a select as
4512 // part of a minimum or maximum operation. If so, refrain from doing
4513 // any other folding. This helps out other analyses which understand
4514 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4515 // and CodeGen. And in this case, at least one of the comparison
4516 // operands has at least one user besides the compare (the select),
4517 // which would often largely negate the benefit of folding anyway.
4519 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
4520 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
4521 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
4524 // FIXME: We only do this after checking for min/max to prevent infinite
4525 // looping caused by a reverse canonicalization of these patterns for min/max.
4526 // FIXME: The organization of folds is a mess. These would naturally go into
4527 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
4528 // down here after the min/max restriction.
4529 ICmpInst::Predicate Pred = I.getPredicate();
4531 if (match(Op1, m_APInt(C))) {
4532 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
4533 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
4534 Constant *Zero = Constant::getNullValue(Op0->getType());
4535 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
4538 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
4539 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
4540 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
4541 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
4545 if (Instruction *Res = foldICmpInstWithConstant(I))
4548 if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
4551 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
4552 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
4553 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
4555 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
4556 if (Instruction *NI = foldGEPICmp(GEP, Op0,
4557 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
4560 // Try to optimize equality comparisons against alloca-based pointers.
4561 if (Op0->getType()->isPointerTy() && I.isEquality()) {
4562 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
4563 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
4564 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
4566 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
4567 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
4571 // Test to see if the operands of the icmp are casted versions of other
4572 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
4574 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
4575 if (Op0->getType()->isPointerTy() &&
4576 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
4577 // We keep moving the cast from the left operand over to the right
4578 // operand, where it can often be eliminated completely.
4579 Op0 = CI->getOperand(0);
4581 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
4582 // so eliminate it as well.
4583 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
4584 Op1 = CI2->getOperand(0);
4586 // If Op1 is a constant, we can fold the cast into the constant.
4587 if (Op0->getType() != Op1->getType()) {
4588 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
4589 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
4591 // Otherwise, cast the RHS right before the icmp
4592 Op1 = Builder.CreateBitCast(Op1, Op0->getType());
4595 return new ICmpInst(I.getPredicate(), Op0, Op1);
4599 if (isa<CastInst>(Op0)) {
4600 // Handle the special case of: icmp (cast bool to X), <cst>
4601 // This comes up when you have code like
4604 // For generality, we handle any zero-extension of any operand comparison
4605 // with a constant or another cast from the same type.
4606 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
4607 if (Instruction *R = foldICmpWithCastAndCast(I))
4611 if (Instruction *Res = foldICmpBinOp(I))
4614 if (Instruction *Res = foldICmpWithMinMax(I))
4619 // Transform (A & ~B) == 0 --> (A & B) != 0
4620 // and (A & ~B) != 0 --> (A & B) == 0
4621 // if A is a power of 2.
4622 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
4623 match(Op1, m_Zero()) &&
4624 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
4625 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
4628 // ~X < ~Y --> Y < X
4629 // ~X < C --> X > ~C
4630 if (match(Op0, m_Not(m_Value(A)))) {
4631 if (match(Op1, m_Not(m_Value(B))))
4632 return new ICmpInst(I.getPredicate(), B, A);
4635 if (match(Op1, m_APInt(C)))
4636 return new ICmpInst(I.getSwappedPredicate(), A,
4637 ConstantInt::get(Op1->getType(), ~(*C)));
4640 Instruction *AddI = nullptr;
4641 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
4642 m_Instruction(AddI))) &&
4643 isa<IntegerType>(A->getType())) {
4646 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
4648 replaceInstUsesWith(*AddI, Result);
4649 return replaceInstUsesWith(I, Overflow);
4653 // (zext a) * (zext b) --> llvm.umul.with.overflow.
4654 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4655 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
4658 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4659 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
4664 if (Instruction *Res = foldICmpEquality(I))
4667 // The 'cmpxchg' instruction returns an aggregate containing the old value and
4668 // an i1 which indicates whether or not we successfully did the swap.
4670 // Replace comparisons between the old value and the expected value with the
4671 // indicator that 'cmpxchg' returns.
4673 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
4674 // spuriously fail. In those cases, the old value may equal the expected
4675 // value but it is possible for the swap to not occur.
4676 if (I.getPredicate() == ICmpInst::ICMP_EQ)
4677 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
4678 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
4679 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
4681 return ExtractValueInst::Create(ACXI, 1);
4684 Value *X; ConstantInt *Cst;
4686 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
4687 return foldICmpAddOpConst(I, X, Cst, I.getPredicate());
4690 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
4691 return foldICmpAddOpConst(I, X, Cst, I.getSwappedPredicate());
4693 return Changed ? &I : nullptr;
4696 /// Fold fcmp ([us]itofp x, cst) if possible.
4697 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
4699 if (!isa<ConstantFP>(RHSC)) return nullptr;
4700 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
4702 // Get the width of the mantissa. We don't want to hack on conversions that
4703 // might lose information from the integer, e.g. "i64 -> float"
4704 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
4705 if (MantissaWidth == -1) return nullptr; // Unknown.
4707 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
4709 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
4711 if (I.isEquality()) {
4712 FCmpInst::Predicate P = I.getPredicate();
4713 bool IsExact = false;
4714 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
4715 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
4717 // If the floating point constant isn't an integer value, we know if we will
4718 // ever compare equal / not equal to it.
4720 // TODO: Can never be -0.0 and other non-representable values
4721 APFloat RHSRoundInt(RHS);
4722 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
4723 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
4724 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
4725 return replaceInstUsesWith(I, Builder.getFalse());
4727 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
4728 return replaceInstUsesWith(I, Builder.getTrue());
4732 // TODO: If the constant is exactly representable, is it always OK to do
4733 // equality compares as integer?
4736 // Check to see that the input is converted from an integer type that is small
4737 // enough that preserves all bits. TODO: check here for "known" sign bits.
4738 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
4739 unsigned InputSize = IntTy->getScalarSizeInBits();
4741 // Following test does NOT adjust InputSize downwards for signed inputs,
4742 // because the most negative value still requires all the mantissa bits
4743 // to distinguish it from one less than that value.
4744 if ((int)InputSize > MantissaWidth) {
4745 // Conversion would lose accuracy. Check if loss can impact comparison.
4746 int Exp = ilogb(RHS);
4747 if (Exp == APFloat::IEK_Inf) {
4748 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
4749 if (MaxExponent < (int)InputSize - !LHSUnsigned)
4750 // Conversion could create infinity.
4753 // Note that if RHS is zero or NaN, then Exp is negative
4754 // and first condition is trivially false.
4755 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
4756 // Conversion could affect comparison.
4761 // Otherwise, we can potentially simplify the comparison. We know that it
4762 // will always come through as an integer value and we know the constant is
4763 // not a NAN (it would have been previously simplified).
4764 assert(!RHS.isNaN() && "NaN comparison not already folded!");
4766 ICmpInst::Predicate Pred;
4767 switch (I.getPredicate()) {
4768 default: llvm_unreachable("Unexpected predicate!");
4769 case FCmpInst::FCMP_UEQ:
4770 case FCmpInst::FCMP_OEQ:
4771 Pred = ICmpInst::ICMP_EQ;
4773 case FCmpInst::FCMP_UGT:
4774 case FCmpInst::FCMP_OGT:
4775 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
4777 case FCmpInst::FCMP_UGE:
4778 case FCmpInst::FCMP_OGE:
4779 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
4781 case FCmpInst::FCMP_ULT:
4782 case FCmpInst::FCMP_OLT:
4783 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
4785 case FCmpInst::FCMP_ULE:
4786 case FCmpInst::FCMP_OLE:
4787 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
4789 case FCmpInst::FCMP_UNE:
4790 case FCmpInst::FCMP_ONE:
4791 Pred = ICmpInst::ICMP_NE;
4793 case FCmpInst::FCMP_ORD:
4794 return replaceInstUsesWith(I, Builder.getTrue());
4795 case FCmpInst::FCMP_UNO:
4796 return replaceInstUsesWith(I, Builder.getFalse());
4799 // Now we know that the APFloat is a normal number, zero or inf.
4801 // See if the FP constant is too large for the integer. For example,
4802 // comparing an i8 to 300.0.
4803 unsigned IntWidth = IntTy->getScalarSizeInBits();
4806 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
4807 // and large values.
4808 APFloat SMax(RHS.getSemantics());
4809 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
4810 APFloat::rmNearestTiesToEven);
4811 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
4812 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
4813 Pred == ICmpInst::ICMP_SLE)
4814 return replaceInstUsesWith(I, Builder.getTrue());
4815 return replaceInstUsesWith(I, Builder.getFalse());
4818 // If the RHS value is > UnsignedMax, fold the comparison. This handles
4819 // +INF and large values.
4820 APFloat UMax(RHS.getSemantics());
4821 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
4822 APFloat::rmNearestTiesToEven);
4823 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
4824 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
4825 Pred == ICmpInst::ICMP_ULE)
4826 return replaceInstUsesWith(I, Builder.getTrue());
4827 return replaceInstUsesWith(I, Builder.getFalse());
4832 // See if the RHS value is < SignedMin.
4833 APFloat SMin(RHS.getSemantics());
4834 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
4835 APFloat::rmNearestTiesToEven);
4836 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
4837 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
4838 Pred == ICmpInst::ICMP_SGE)
4839 return replaceInstUsesWith(I, Builder.getTrue());
4840 return replaceInstUsesWith(I, Builder.getFalse());
4843 // See if the RHS value is < UnsignedMin.
4844 APFloat SMin(RHS.getSemantics());
4845 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
4846 APFloat::rmNearestTiesToEven);
4847 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
4848 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
4849 Pred == ICmpInst::ICMP_UGE)
4850 return replaceInstUsesWith(I, Builder.getTrue());
4851 return replaceInstUsesWith(I, Builder.getFalse());
4855 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
4856 // [0, UMAX], but it may still be fractional. See if it is fractional by
4857 // casting the FP value to the integer value and back, checking for equality.
4858 // Don't do this for zero, because -0.0 is not fractional.
4859 Constant *RHSInt = LHSUnsigned
4860 ? ConstantExpr::getFPToUI(RHSC, IntTy)
4861 : ConstantExpr::getFPToSI(RHSC, IntTy);
4862 if (!RHS.isZero()) {
4863 bool Equal = LHSUnsigned
4864 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
4865 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
4867 // If we had a comparison against a fractional value, we have to adjust
4868 // the compare predicate and sometimes the value. RHSC is rounded towards
4869 // zero at this point.
4871 default: llvm_unreachable("Unexpected integer comparison!");
4872 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
4873 return replaceInstUsesWith(I, Builder.getTrue());
4874 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
4875 return replaceInstUsesWith(I, Builder.getFalse());
4876 case ICmpInst::ICMP_ULE:
4877 // (float)int <= 4.4 --> int <= 4
4878 // (float)int <= -4.4 --> false
4879 if (RHS.isNegative())
4880 return replaceInstUsesWith(I, Builder.getFalse());
4882 case ICmpInst::ICMP_SLE:
4883 // (float)int <= 4.4 --> int <= 4
4884 // (float)int <= -4.4 --> int < -4
4885 if (RHS.isNegative())
4886 Pred = ICmpInst::ICMP_SLT;
4888 case ICmpInst::ICMP_ULT:
4889 // (float)int < -4.4 --> false
4890 // (float)int < 4.4 --> int <= 4
4891 if (RHS.isNegative())
4892 return replaceInstUsesWith(I, Builder.getFalse());
4893 Pred = ICmpInst::ICMP_ULE;
4895 case ICmpInst::ICMP_SLT:
4896 // (float)int < -4.4 --> int < -4
4897 // (float)int < 4.4 --> int <= 4
4898 if (!RHS.isNegative())
4899 Pred = ICmpInst::ICMP_SLE;
4901 case ICmpInst::ICMP_UGT:
4902 // (float)int > 4.4 --> int > 4
4903 // (float)int > -4.4 --> true
4904 if (RHS.isNegative())
4905 return replaceInstUsesWith(I, Builder.getTrue());
4907 case ICmpInst::ICMP_SGT:
4908 // (float)int > 4.4 --> int > 4
4909 // (float)int > -4.4 --> int >= -4
4910 if (RHS.isNegative())
4911 Pred = ICmpInst::ICMP_SGE;
4913 case ICmpInst::ICMP_UGE:
4914 // (float)int >= -4.4 --> true
4915 // (float)int >= 4.4 --> int > 4
4916 if (RHS.isNegative())
4917 return replaceInstUsesWith(I, Builder.getTrue());
4918 Pred = ICmpInst::ICMP_UGT;
4920 case ICmpInst::ICMP_SGE:
4921 // (float)int >= -4.4 --> int >= -4
4922 // (float)int >= 4.4 --> int > 4
4923 if (!RHS.isNegative())
4924 Pred = ICmpInst::ICMP_SGT;
4930 // Lower this FP comparison into an appropriate integer version of the
4932 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
4935 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
4936 bool Changed = false;
4938 /// Orders the operands of the compare so that they are listed from most
4939 /// complex to least complex. This puts constants before unary operators,
4940 /// before binary operators.
4941 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
4946 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4949 SimplifyFCmpInst(I.getPredicate(), Op0, Op1, I.getFastMathFlags(),
4950 SQ.getWithInstruction(&I)))
4951 return replaceInstUsesWith(I, V);
4953 // Simplify 'fcmp pred X, X'
4955 switch (I.getPredicate()) {
4956 default: llvm_unreachable("Unknown predicate!");
4957 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
4958 case FCmpInst::FCMP_ULT: // True if unordered or less than
4959 case FCmpInst::FCMP_UGT: // True if unordered or greater than
4960 case FCmpInst::FCMP_UNE: // True if unordered or not equal
4961 // Canonicalize these to be 'fcmp uno %X, 0.0'.
4962 I.setPredicate(FCmpInst::FCMP_UNO);
4963 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4966 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
4967 case FCmpInst::FCMP_OEQ: // True if ordered and equal
4968 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
4969 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
4970 // Canonicalize these to be 'fcmp ord %X, 0.0'.
4971 I.setPredicate(FCmpInst::FCMP_ORD);
4972 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4977 // Test if the FCmpInst instruction is used exclusively by a select as
4978 // part of a minimum or maximum operation. If so, refrain from doing
4979 // any other folding. This helps out other analyses which understand
4980 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4981 // and CodeGen. And in this case, at least one of the comparison
4982 // operands has at least one user besides the compare (the select),
4983 // which would often largely negate the benefit of folding anyway.
4985 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
4986 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
4987 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
4990 // Handle fcmp with constant RHS
4991 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
4992 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
4993 switch (LHSI->getOpcode()) {
4994 case Instruction::FPExt: {
4995 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
4996 FPExtInst *LHSExt = cast<FPExtInst>(LHSI);
4997 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC);
5001 const fltSemantics *Sem;
5002 // FIXME: This shouldn't be here.
5003 if (LHSExt->getSrcTy()->isHalfTy())
5004 Sem = &APFloat::IEEEhalf();
5005 else if (LHSExt->getSrcTy()->isFloatTy())
5006 Sem = &APFloat::IEEEsingle();
5007 else if (LHSExt->getSrcTy()->isDoubleTy())
5008 Sem = &APFloat::IEEEdouble();
5009 else if (LHSExt->getSrcTy()->isFP128Ty())
5010 Sem = &APFloat::IEEEquad();
5011 else if (LHSExt->getSrcTy()->isX86_FP80Ty())
5012 Sem = &APFloat::x87DoubleExtended();
5013 else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
5014 Sem = &APFloat::PPCDoubleDouble();
5019 APFloat F = RHSF->getValueAPF();
5020 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy);
5022 // Avoid lossy conversions and denormals. Zero is a special case
5023 // that's OK to convert.
5027 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) !=
5028 APFloat::cmpLessThan) || Fabs.isZero()))
5030 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
5031 ConstantFP::get(RHSC->getContext(), F));
5034 case Instruction::PHI:
5035 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5036 // block. If in the same block, we're encouraging jump threading. If
5037 // not, we are just pessimizing the code by making an i1 phi.
5038 if (LHSI->getParent() == I.getParent())
5039 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
5042 case Instruction::SIToFP:
5043 case Instruction::UIToFP:
5044 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
5047 case Instruction::FSub: {
5048 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
5050 if (match(LHSI, m_FNeg(m_Value(Op))))
5051 return new FCmpInst(I.getSwappedPredicate(), Op,
5052 ConstantExpr::getFNeg(RHSC));
5055 case Instruction::Load:
5056 if (GetElementPtrInst *GEP =
5057 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
5058 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
5059 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
5060 !cast<LoadInst>(LHSI)->isVolatile())
5061 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
5065 case Instruction::Call: {
5066 if (!RHSC->isNullValue())
5069 CallInst *CI = cast<CallInst>(LHSI);
5070 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI);
5071 if (IID != Intrinsic::fabs)
5074 // Various optimization for fabs compared with zero.
5075 switch (I.getPredicate()) {
5078 // fabs(x) < 0 --> false
5079 case FCmpInst::FCMP_OLT:
5080 llvm_unreachable("handled by SimplifyFCmpInst");
5081 // fabs(x) > 0 --> x != 0
5082 case FCmpInst::FCMP_OGT:
5083 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
5084 // fabs(x) <= 0 --> x == 0
5085 case FCmpInst::FCMP_OLE:
5086 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC);
5087 // fabs(x) >= 0 --> !isnan(x)
5088 case FCmpInst::FCMP_OGE:
5089 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC);
5090 // fabs(x) == 0 --> x == 0
5091 // fabs(x) != 0 --> x != 0
5092 case FCmpInst::FCMP_OEQ:
5093 case FCmpInst::FCMP_UEQ:
5094 case FCmpInst::FCMP_ONE:
5095 case FCmpInst::FCMP_UNE:
5096 return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), RHSC);
5102 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
5104 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
5105 return new FCmpInst(I.getSwappedPredicate(), X, Y);
5107 // fcmp (fpext x), (fpext y) -> fcmp x, y
5108 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0))
5109 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1))
5110 if (LHSExt->getSrcTy() == RHSExt->getSrcTy())
5111 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
5112 RHSExt->getOperand(0));
5114 return Changed ? &I : nullptr;