1 //===- InstCombineCompares.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitICmp and visitFCmp functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APSInt.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/IR/ConstantRange.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/KnownBits.h"
30 using namespace PatternMatch;
32 #define DEBUG_TYPE "instcombine"
34 // How many times is a select replaced by one of its operands?
35 STATISTIC(NumSel, "Number of select opts");
38 /// Compute Result = In1+In2, returning true if the result overflowed for this
40 static bool addWithOverflow(APInt &Result, const APInt &In1,
41 const APInt &In2, bool IsSigned = false) {
44 Result = In1.sadd_ov(In2, Overflow);
46 Result = In1.uadd_ov(In2, Overflow);
51 /// Compute Result = In1-In2, returning true if the result overflowed for this
53 static bool subWithOverflow(APInt &Result, const APInt &In1,
54 const APInt &In2, bool IsSigned = false) {
57 Result = In1.ssub_ov(In2, Overflow);
59 Result = In1.usub_ov(In2, Overflow);
64 /// Given an icmp instruction, return true if any use of this comparison is a
65 /// branch on sign bit comparison.
66 static bool hasBranchUse(ICmpInst &I) {
67 for (auto *U : I.users())
68 if (isa<BranchInst>(U))
73 /// Given an exploded icmp instruction, return true if the comparison only
74 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the
75 /// result of the comparison is true when the input value is signed.
76 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
79 case ICmpInst::ICMP_SLT: // True if LHS s< 0
81 return RHS.isNullValue();
82 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
84 return RHS.isAllOnesValue();
85 case ICmpInst::ICMP_SGT: // True if LHS s> -1
87 return RHS.isAllOnesValue();
88 case ICmpInst::ICMP_UGT:
89 // True if LHS u> RHS and RHS == high-bit-mask - 1
91 return RHS.isMaxSignedValue();
92 case ICmpInst::ICMP_UGE:
93 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
95 return RHS.isSignMask();
101 /// Returns true if the exploded icmp can be expressed as a signed comparison
102 /// to zero and updates the predicate accordingly.
103 /// The signedness of the comparison is preserved.
104 /// TODO: Refactor with decomposeBitTestICmp()?
105 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
106 if (!ICmpInst::isSigned(Pred))
110 return ICmpInst::isRelational(Pred);
112 if (C.isOneValue()) {
113 if (Pred == ICmpInst::ICMP_SLT) {
114 Pred = ICmpInst::ICMP_SLE;
117 } else if (C.isAllOnesValue()) {
118 if (Pred == ICmpInst::ICMP_SGT) {
119 Pred = ICmpInst::ICMP_SGE;
127 /// Given a signed integer type and a set of known zero and one bits, compute
128 /// the maximum and minimum values that could have the specified known zero and
129 /// known one bits, returning them in Min/Max.
130 /// TODO: Move to method on KnownBits struct?
131 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
132 APInt &Min, APInt &Max) {
133 assert(Known.getBitWidth() == Min.getBitWidth() &&
134 Known.getBitWidth() == Max.getBitWidth() &&
135 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
136 APInt UnknownBits = ~(Known.Zero|Known.One);
138 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
139 // bit if it is unknown.
141 Max = Known.One|UnknownBits;
143 if (UnknownBits.isNegative()) { // Sign bit is unknown
149 /// Given an unsigned integer type and a set of known zero and one bits, compute
150 /// the maximum and minimum values that could have the specified known zero and
151 /// known one bits, returning them in Min/Max.
152 /// TODO: Move to method on KnownBits struct?
153 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
154 APInt &Min, APInt &Max) {
155 assert(Known.getBitWidth() == Min.getBitWidth() &&
156 Known.getBitWidth() == Max.getBitWidth() &&
157 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
158 APInt UnknownBits = ~(Known.Zero|Known.One);
160 // The minimum value is when the unknown bits are all zeros.
162 // The maximum value is when the unknown bits are all ones.
163 Max = Known.One|UnknownBits;
166 /// This is called when we see this pattern:
167 /// cmp pred (load (gep GV, ...)), cmpcst
168 /// where GV is a global variable with a constant initializer. Try to simplify
169 /// this into some simple computation that does not need the load. For example
170 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
172 /// If AndCst is non-null, then the loaded value is masked with that constant
173 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
174 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
177 ConstantInt *AndCst) {
178 Constant *Init = GV->getInitializer();
179 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
182 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
183 // Don't blow up on huge arrays.
184 if (ArrayElementCount > MaxArraySizeForCombine)
187 // There are many forms of this optimization we can handle, for now, just do
188 // the simple index into a single-dimensional array.
190 // Require: GEP GV, 0, i {{, constant indices}}
191 if (GEP->getNumOperands() < 3 ||
192 !isa<ConstantInt>(GEP->getOperand(1)) ||
193 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
194 isa<Constant>(GEP->getOperand(2)))
197 // Check that indices after the variable are constants and in-range for the
198 // type they index. Collect the indices. This is typically for arrays of
200 SmallVector<unsigned, 4> LaterIndices;
202 Type *EltTy = Init->getType()->getArrayElementType();
203 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
204 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
205 if (!Idx) return nullptr; // Variable index.
207 uint64_t IdxVal = Idx->getZExtValue();
208 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
210 if (StructType *STy = dyn_cast<StructType>(EltTy))
211 EltTy = STy->getElementType(IdxVal);
212 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
213 if (IdxVal >= ATy->getNumElements()) return nullptr;
214 EltTy = ATy->getElementType();
216 return nullptr; // Unknown type.
219 LaterIndices.push_back(IdxVal);
222 enum { Overdefined = -3, Undefined = -2 };
224 // Variables for our state machines.
226 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
227 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
228 // and 87 is the second (and last) index. FirstTrueElement is -2 when
229 // undefined, otherwise set to the first true element. SecondTrueElement is
230 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
231 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
233 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
234 // form "i != 47 & i != 87". Same state transitions as for true elements.
235 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
237 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
238 /// define a state machine that triggers for ranges of values that the index
239 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
240 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
241 /// index in the range (inclusive). We use -2 for undefined here because we
242 /// use relative comparisons and don't want 0-1 to match -1.
243 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
245 // MagicBitvector - This is a magic bitvector where we set a bit if the
246 // comparison is true for element 'i'. If there are 64 elements or less in
247 // the array, this will fully represent all the comparison results.
248 uint64_t MagicBitvector = 0;
250 // Scan the array and see if one of our patterns matches.
251 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
252 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
253 Constant *Elt = Init->getAggregateElement(i);
254 if (!Elt) return nullptr;
256 // If this is indexing an array of structures, get the structure element.
257 if (!LaterIndices.empty())
258 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
260 // If the element is masked, handle it.
261 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
263 // Find out if the comparison would be true or false for the i'th element.
264 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
265 CompareRHS, DL, &TLI);
266 // If the result is undef for this element, ignore it.
267 if (isa<UndefValue>(C)) {
268 // Extend range state machines to cover this element in case there is an
269 // undef in the middle of the range.
270 if (TrueRangeEnd == (int)i-1)
272 if (FalseRangeEnd == (int)i-1)
277 // If we can't compute the result for any of the elements, we have to give
278 // up evaluating the entire conditional.
279 if (!isa<ConstantInt>(C)) return nullptr;
281 // Otherwise, we know if the comparison is true or false for this element,
282 // update our state machines.
283 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
285 // State machine for single/double/range index comparison.
287 // Update the TrueElement state machine.
288 if (FirstTrueElement == Undefined)
289 FirstTrueElement = TrueRangeEnd = i; // First true element.
291 // Update double-compare state machine.
292 if (SecondTrueElement == Undefined)
293 SecondTrueElement = i;
295 SecondTrueElement = Overdefined;
297 // Update range state machine.
298 if (TrueRangeEnd == (int)i-1)
301 TrueRangeEnd = Overdefined;
304 // Update the FalseElement state machine.
305 if (FirstFalseElement == Undefined)
306 FirstFalseElement = FalseRangeEnd = i; // First false element.
308 // Update double-compare state machine.
309 if (SecondFalseElement == Undefined)
310 SecondFalseElement = i;
312 SecondFalseElement = Overdefined;
314 // Update range state machine.
315 if (FalseRangeEnd == (int)i-1)
318 FalseRangeEnd = Overdefined;
322 // If this element is in range, update our magic bitvector.
323 if (i < 64 && IsTrueForElt)
324 MagicBitvector |= 1ULL << i;
326 // If all of our states become overdefined, bail out early. Since the
327 // predicate is expensive, only check it every 8 elements. This is only
328 // really useful for really huge arrays.
329 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
330 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
331 FalseRangeEnd == Overdefined)
335 // Now that we've scanned the entire array, emit our new comparison(s). We
336 // order the state machines in complexity of the generated code.
337 Value *Idx = GEP->getOperand(2);
339 // If the index is larger than the pointer size of the target, truncate the
340 // index down like the GEP would do implicitly. We don't have to do this for
341 // an inbounds GEP because the index can't be out of range.
342 if (!GEP->isInBounds()) {
343 Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
344 unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
345 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
346 Idx = Builder.CreateTrunc(Idx, IntPtrTy);
349 // If the comparison is only true for one or two elements, emit direct
351 if (SecondTrueElement != Overdefined) {
352 // None true -> false.
353 if (FirstTrueElement == Undefined)
354 return replaceInstUsesWith(ICI, Builder.getFalse());
356 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
358 // True for one element -> 'i == 47'.
359 if (SecondTrueElement == Undefined)
360 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
362 // True for two elements -> 'i == 47 | i == 72'.
363 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
364 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
365 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
366 return BinaryOperator::CreateOr(C1, C2);
369 // If the comparison is only false for one or two elements, emit direct
371 if (SecondFalseElement != Overdefined) {
372 // None false -> true.
373 if (FirstFalseElement == Undefined)
374 return replaceInstUsesWith(ICI, Builder.getTrue());
376 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
378 // False for one element -> 'i != 47'.
379 if (SecondFalseElement == Undefined)
380 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
382 // False for two elements -> 'i != 47 & i != 72'.
383 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
384 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
385 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
386 return BinaryOperator::CreateAnd(C1, C2);
389 // If the comparison can be replaced with a range comparison for the elements
390 // where it is true, emit the range check.
391 if (TrueRangeEnd != Overdefined) {
392 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
394 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
395 if (FirstTrueElement) {
396 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
397 Idx = Builder.CreateAdd(Idx, Offs);
400 Value *End = ConstantInt::get(Idx->getType(),
401 TrueRangeEnd-FirstTrueElement+1);
402 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
405 // False range check.
406 if (FalseRangeEnd != Overdefined) {
407 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
408 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
409 if (FirstFalseElement) {
410 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
411 Idx = Builder.CreateAdd(Idx, Offs);
414 Value *End = ConstantInt::get(Idx->getType(),
415 FalseRangeEnd-FirstFalseElement);
416 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
419 // If a magic bitvector captures the entire comparison state
420 // of this load, replace it with computation that does:
421 // ((magic_cst >> i) & 1) != 0
425 // Look for an appropriate type:
426 // - The type of Idx if the magic fits
427 // - The smallest fitting legal type
428 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
431 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
434 Value *V = Builder.CreateIntCast(Idx, Ty, false);
435 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
436 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
437 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
444 /// Return a value that can be used to compare the *offset* implied by a GEP to
445 /// zero. For example, if we have &A[i], we want to return 'i' for
446 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
447 /// are involved. The above expression would also be legal to codegen as
448 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
449 /// This latter form is less amenable to optimization though, and we are allowed
450 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
452 /// If we can't emit an optimized form for this expression, this returns null.
454 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
455 const DataLayout &DL) {
456 gep_type_iterator GTI = gep_type_begin(GEP);
458 // Check to see if this gep only has a single variable index. If so, and if
459 // any constant indices are a multiple of its scale, then we can compute this
460 // in terms of the scale of the variable index. For example, if the GEP
461 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
462 // because the expression will cross zero at the same point.
463 unsigned i, e = GEP->getNumOperands();
465 for (i = 1; i != e; ++i, ++GTI) {
466 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
467 // Compute the aggregate offset of constant indices.
468 if (CI->isZero()) continue;
470 // Handle a struct index, which adds its field offset to the pointer.
471 if (StructType *STy = GTI.getStructTypeOrNull()) {
472 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
474 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
475 Offset += Size*CI->getSExtValue();
478 // Found our variable index.
483 // If there are no variable indices, we must have a constant offset, just
484 // evaluate it the general way.
485 if (i == e) return nullptr;
487 Value *VariableIdx = GEP->getOperand(i);
488 // Determine the scale factor of the variable element. For example, this is
489 // 4 if the variable index is into an array of i32.
490 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
492 // Verify that there are no other variable indices. If so, emit the hard way.
493 for (++i, ++GTI; i != e; ++i, ++GTI) {
494 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
495 if (!CI) return nullptr;
497 // Compute the aggregate offset of constant indices.
498 if (CI->isZero()) continue;
500 // Handle a struct index, which adds its field offset to the pointer.
501 if (StructType *STy = GTI.getStructTypeOrNull()) {
502 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
504 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
505 Offset += Size*CI->getSExtValue();
509 // Okay, we know we have a single variable index, which must be a
510 // pointer/array/vector index. If there is no offset, life is simple, return
512 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
513 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
515 // Cast to intptrty in case a truncation occurs. If an extension is needed,
516 // we don't need to bother extending: the extension won't affect where the
517 // computation crosses zero.
518 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
519 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
524 // Otherwise, there is an index. The computation we will do will be modulo
525 // the pointer size, so get it.
526 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
528 Offset &= PtrSizeMask;
529 VariableScale &= PtrSizeMask;
531 // To do this transformation, any constant index must be a multiple of the
532 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
533 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
534 // multiple of the variable scale.
535 int64_t NewOffs = Offset / (int64_t)VariableScale;
536 if (Offset != NewOffs*(int64_t)VariableScale)
539 // Okay, we can do this evaluation. Start by converting the index to intptr.
540 if (VariableIdx->getType() != IntPtrTy)
541 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
543 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
544 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
547 /// Returns true if we can rewrite Start as a GEP with pointer Base
548 /// and some integer offset. The nodes that need to be re-written
549 /// for this transformation will be added to Explored.
550 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
551 const DataLayout &DL,
552 SetVector<Value *> &Explored) {
553 SmallVector<Value *, 16> WorkList(1, Start);
554 Explored.insert(Base);
556 // The following traversal gives us an order which can be used
557 // when doing the final transformation. Since in the final
558 // transformation we create the PHI replacement instructions first,
559 // we don't have to get them in any particular order.
561 // However, for other instructions we will have to traverse the
562 // operands of an instruction first, which means that we have to
563 // do a post-order traversal.
564 while (!WorkList.empty()) {
565 SetVector<PHINode *> PHIs;
567 while (!WorkList.empty()) {
568 if (Explored.size() >= 100)
571 Value *V = WorkList.back();
573 if (Explored.count(V) != 0) {
578 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
579 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
580 // We've found some value that we can't explore which is different from
581 // the base. Therefore we can't do this transformation.
584 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
585 auto *CI = dyn_cast<CastInst>(V);
586 if (!CI->isNoopCast(DL))
589 if (Explored.count(CI->getOperand(0)) == 0)
590 WorkList.push_back(CI->getOperand(0));
593 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
594 // We're limiting the GEP to having one index. This will preserve
595 // the original pointer type. We could handle more cases in the
597 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
598 GEP->getType() != Start->getType())
601 if (Explored.count(GEP->getOperand(0)) == 0)
602 WorkList.push_back(GEP->getOperand(0));
605 if (WorkList.back() == V) {
607 // We've finished visiting this node, mark it as such.
611 if (auto *PN = dyn_cast<PHINode>(V)) {
612 // We cannot transform PHIs on unsplittable basic blocks.
613 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
620 // Explore the PHI nodes further.
621 for (auto *PN : PHIs)
622 for (Value *Op : PN->incoming_values())
623 if (Explored.count(Op) == 0)
624 WorkList.push_back(Op);
627 // Make sure that we can do this. Since we can't insert GEPs in a basic
628 // block before a PHI node, we can't easily do this transformation if
629 // we have PHI node users of transformed instructions.
630 for (Value *Val : Explored) {
631 for (Value *Use : Val->uses()) {
633 auto *PHI = dyn_cast<PHINode>(Use);
634 auto *Inst = dyn_cast<Instruction>(Val);
636 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
637 Explored.count(PHI) == 0)
640 if (PHI->getParent() == Inst->getParent())
647 // Sets the appropriate insert point on Builder where we can add
648 // a replacement Instruction for V (if that is possible).
649 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
650 bool Before = true) {
651 if (auto *PHI = dyn_cast<PHINode>(V)) {
652 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
655 if (auto *I = dyn_cast<Instruction>(V)) {
657 I = &*std::next(I->getIterator());
658 Builder.SetInsertPoint(I);
661 if (auto *A = dyn_cast<Argument>(V)) {
662 // Set the insertion point in the entry block.
663 BasicBlock &Entry = A->getParent()->getEntryBlock();
664 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
667 // Otherwise, this is a constant and we don't need to set a new
669 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
672 /// Returns a re-written value of Start as an indexed GEP using Base as a
674 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
675 const DataLayout &DL,
676 SetVector<Value *> &Explored) {
677 // Perform all the substitutions. This is a bit tricky because we can
678 // have cycles in our use-def chains.
679 // 1. Create the PHI nodes without any incoming values.
680 // 2. Create all the other values.
681 // 3. Add the edges for the PHI nodes.
682 // 4. Emit GEPs to get the original pointers.
683 // 5. Remove the original instructions.
684 Type *IndexType = IntegerType::get(
685 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType()));
687 DenseMap<Value *, Value *> NewInsts;
688 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
690 // Create the new PHI nodes, without adding any incoming values.
691 for (Value *Val : Explored) {
694 // Create empty phi nodes. This avoids cyclic dependencies when creating
695 // the remaining instructions.
696 if (auto *PHI = dyn_cast<PHINode>(Val))
697 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
698 PHI->getName() + ".idx", PHI);
700 IRBuilder<> Builder(Base->getContext());
702 // Create all the other instructions.
703 for (Value *Val : Explored) {
705 if (NewInsts.find(Val) != NewInsts.end())
708 if (auto *CI = dyn_cast<CastInst>(Val)) {
709 NewInsts[CI] = NewInsts[CI->getOperand(0)];
712 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
713 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
714 : GEP->getOperand(1);
715 setInsertionPoint(Builder, GEP);
716 // Indices might need to be sign extended. GEPs will magically do
717 // this, but we need to do it ourselves here.
718 if (Index->getType()->getScalarSizeInBits() !=
719 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
720 Index = Builder.CreateSExtOrTrunc(
721 Index, NewInsts[GEP->getOperand(0)]->getType(),
722 GEP->getOperand(0)->getName() + ".sext");
725 auto *Op = NewInsts[GEP->getOperand(0)];
726 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero())
727 NewInsts[GEP] = Index;
729 NewInsts[GEP] = Builder.CreateNSWAdd(
730 Op, Index, GEP->getOperand(0)->getName() + ".add");
733 if (isa<PHINode>(Val))
736 llvm_unreachable("Unexpected instruction type");
739 // Add the incoming values to the PHI nodes.
740 for (Value *Val : Explored) {
743 // All the instructions have been created, we can now add edges to the
745 if (auto *PHI = dyn_cast<PHINode>(Val)) {
746 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
747 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
748 Value *NewIncoming = PHI->getIncomingValue(I);
750 if (NewInsts.find(NewIncoming) != NewInsts.end())
751 NewIncoming = NewInsts[NewIncoming];
753 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
758 for (Value *Val : Explored) {
762 // Depending on the type, for external users we have to emit
763 // a GEP or a GEP + ptrtoint.
764 setInsertionPoint(Builder, Val, false);
766 // If required, create an inttoptr instruction for Base.
767 Value *NewBase = Base;
768 if (!Base->getType()->isPointerTy())
769 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
770 Start->getName() + "to.ptr");
772 Value *GEP = Builder.CreateInBoundsGEP(
773 Start->getType()->getPointerElementType(), NewBase,
774 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
776 if (!Val->getType()->isPointerTy()) {
777 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
778 Val->getName() + ".conv");
781 Val->replaceAllUsesWith(GEP);
784 return NewInsts[Start];
787 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
788 /// the input Value as a constant indexed GEP. Returns a pair containing
789 /// the GEPs Pointer and Index.
790 static std::pair<Value *, Value *>
791 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
792 Type *IndexType = IntegerType::get(V->getContext(),
793 DL.getPointerTypeSizeInBits(V->getType()));
795 Constant *Index = ConstantInt::getNullValue(IndexType);
797 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
798 // We accept only inbouds GEPs here to exclude the possibility of
800 if (!GEP->isInBounds())
802 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
803 GEP->getType() == V->getType()) {
804 V = GEP->getOperand(0);
805 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
806 Index = ConstantExpr::getAdd(
807 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
812 if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
813 if (!CI->isNoopCast(DL))
815 V = CI->getOperand(0);
818 if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
819 if (!CI->isNoopCast(DL))
821 V = CI->getOperand(0);
829 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
830 /// We can look through PHIs, GEPs and casts in order to determine a common base
831 /// between GEPLHS and RHS.
832 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
833 ICmpInst::Predicate Cond,
834 const DataLayout &DL) {
835 if (!GEPLHS->hasAllConstantIndices())
838 // Make sure the pointers have the same type.
839 if (GEPLHS->getType() != RHS->getType())
842 Value *PtrBase, *Index;
843 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
845 // The set of nodes that will take part in this transformation.
846 SetVector<Value *> Nodes;
848 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
851 // We know we can re-write this as
852 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
853 // Since we've only looked through inbouds GEPs we know that we
854 // can't have overflow on either side. We can therefore re-write
856 // OFFSET1 cmp OFFSET2
857 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
859 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
860 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
861 // offset. Since Index is the offset of LHS to the base pointer, we will now
862 // compare the offsets instead of comparing the pointers.
863 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
866 /// Fold comparisons between a GEP instruction and something else. At this point
867 /// we know that the GEP is on the LHS of the comparison.
868 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
869 ICmpInst::Predicate Cond,
871 // Don't transform signed compares of GEPs into index compares. Even if the
872 // GEP is inbounds, the final add of the base pointer can have signed overflow
873 // and would change the result of the icmp.
874 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
875 // the maximum signed value for the pointer type.
876 if (ICmpInst::isSigned(Cond))
879 // Look through bitcasts and addrspacecasts. We do not however want to remove
881 if (!isa<GetElementPtrInst>(RHS))
882 RHS = RHS->stripPointerCasts();
884 Value *PtrBase = GEPLHS->getOperand(0);
885 if (PtrBase == RHS && GEPLHS->isInBounds()) {
886 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
887 // This transformation (ignoring the base and scales) is valid because we
888 // know pointers can't overflow since the gep is inbounds. See if we can
889 // output an optimized form.
890 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
892 // If not, synthesize the offset the hard way.
894 Offset = EmitGEPOffset(GEPLHS);
895 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
896 Constant::getNullValue(Offset->getType()));
897 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
898 // If the base pointers are different, but the indices are the same, just
899 // compare the base pointer.
900 if (PtrBase != GEPRHS->getOperand(0)) {
901 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
902 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
903 GEPRHS->getOperand(0)->getType();
905 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
906 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
907 IndicesTheSame = false;
911 // If all indices are the same, just compare the base pointers.
913 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
915 // If we're comparing GEPs with two base pointers that only differ in type
916 // and both GEPs have only constant indices or just one use, then fold
917 // the compare with the adjusted indices.
918 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
919 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
920 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
921 PtrBase->stripPointerCasts() ==
922 GEPRHS->getOperand(0)->stripPointerCasts()) {
923 Value *LOffset = EmitGEPOffset(GEPLHS);
924 Value *ROffset = EmitGEPOffset(GEPRHS);
926 // If we looked through an addrspacecast between different sized address
927 // spaces, the LHS and RHS pointers are different sized
928 // integers. Truncate to the smaller one.
929 Type *LHSIndexTy = LOffset->getType();
930 Type *RHSIndexTy = ROffset->getType();
931 if (LHSIndexTy != RHSIndexTy) {
932 if (LHSIndexTy->getPrimitiveSizeInBits() <
933 RHSIndexTy->getPrimitiveSizeInBits()) {
934 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
936 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
939 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
941 return replaceInstUsesWith(I, Cmp);
944 // Otherwise, the base pointers are different and the indices are
945 // different. Try convert this to an indexed compare by looking through
947 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
950 // If one of the GEPs has all zero indices, recurse.
951 if (GEPLHS->hasAllZeroIndices())
952 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
953 ICmpInst::getSwappedPredicate(Cond), I);
955 // If the other GEP has all zero indices, recurse.
956 if (GEPRHS->hasAllZeroIndices())
957 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
959 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
960 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
961 // If the GEPs only differ by one index, compare it.
962 unsigned NumDifferences = 0; // Keep track of # differences.
963 unsigned DiffOperand = 0; // The operand that differs.
964 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
965 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
966 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
967 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
968 // Irreconcilable differences.
972 if (NumDifferences++) break;
977 if (NumDifferences == 0) // SAME GEP?
978 return replaceInstUsesWith(I, // No comparison is needed here.
979 Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond)));
981 else if (NumDifferences == 1 && GEPsInBounds) {
982 Value *LHSV = GEPLHS->getOperand(DiffOperand);
983 Value *RHSV = GEPRHS->getOperand(DiffOperand);
984 // Make sure we do a signed comparison here.
985 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
989 // Only lower this if the icmp is the only user of the GEP or if we expect
990 // the result to fold to a constant!
991 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
992 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
993 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
994 Value *L = EmitGEPOffset(GEPLHS);
995 Value *R = EmitGEPOffset(GEPRHS);
996 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1000 // Try convert this to an indexed compare by looking through PHIs/casts as a
1002 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1005 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI,
1006 const AllocaInst *Alloca,
1007 const Value *Other) {
1008 assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1010 // It would be tempting to fold away comparisons between allocas and any
1011 // pointer not based on that alloca (e.g. an argument). However, even
1012 // though such pointers cannot alias, they can still compare equal.
1014 // But LLVM doesn't specify where allocas get their memory, so if the alloca
1015 // doesn't escape we can argue that it's impossible to guess its value, and we
1016 // can therefore act as if any such guesses are wrong.
1018 // The code below checks that the alloca doesn't escape, and that it's only
1019 // used in a comparison once (the current instruction). The
1020 // single-comparison-use condition ensures that we're trivially folding all
1021 // comparisons against the alloca consistently, and avoids the risk of
1022 // erroneously folding a comparison of the pointer with itself.
1024 unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1026 SmallVector<const Use *, 32> Worklist;
1027 for (const Use &U : Alloca->uses()) {
1028 if (Worklist.size() >= MaxIter)
1030 Worklist.push_back(&U);
1033 unsigned NumCmps = 0;
1034 while (!Worklist.empty()) {
1035 assert(Worklist.size() <= MaxIter);
1036 const Use *U = Worklist.pop_back_val();
1037 const Value *V = U->getUser();
1040 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1041 isa<SelectInst>(V)) {
1043 } else if (isa<LoadInst>(V)) {
1044 // Loading from the pointer doesn't escape it.
1046 } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1047 // Storing *to* the pointer is fine, but storing the pointer escapes it.
1048 if (SI->getValueOperand() == U->get())
1051 } else if (isa<ICmpInst>(V)) {
1053 return nullptr; // Found more than one cmp.
1055 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1056 switch (Intrin->getIntrinsicID()) {
1057 // These intrinsics don't escape or compare the pointer. Memset is safe
1058 // because we don't allow ptrtoint. Memcpy and memmove are safe because
1059 // we don't allow stores, so src cannot point to V.
1060 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1061 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1069 for (const Use &U : V->uses()) {
1070 if (Worklist.size() >= MaxIter)
1072 Worklist.push_back(&U);
1076 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1077 return replaceInstUsesWith(
1079 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1082 /// Fold "icmp pred (X+CI), X".
1083 Instruction *InstCombiner::foldICmpAddOpConst(Value *X, ConstantInt *CI,
1084 ICmpInst::Predicate Pred) {
1085 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1086 // so the values can never be equal. Similarly for all other "or equals"
1089 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
1090 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
1091 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
1092 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1094 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
1095 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1098 // (X+1) >u X --> X <u (0-1) --> X != 255
1099 // (X+2) >u X --> X <u (0-2) --> X <u 254
1100 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
1101 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1102 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
1104 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
1105 ConstantInt *SMax = ConstantInt::get(X->getContext(),
1106 APInt::getSignedMaxValue(BitWidth));
1108 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
1109 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
1110 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
1111 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
1112 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
1113 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
1114 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1115 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
1117 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
1118 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
1119 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1120 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1121 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
1122 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
1124 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1125 Constant *C = Builder.getInt(CI->getValue() - 1);
1126 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
1129 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1130 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1131 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1132 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A,
1135 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1137 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1138 if (I.getPredicate() == I.ICMP_NE)
1139 Pred = CmpInst::getInversePredicate(Pred);
1140 return new ICmpInst(Pred, LHS, RHS);
1143 // Don't bother doing any work for cases which InstSimplify handles.
1144 if (AP2.isNullValue())
1147 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1149 if (AP2.isAllOnesValue())
1151 if (AP2.isNegative() != AP1.isNegative())
1158 // 'A' must be large enough to shift out the highest set bit.
1159 return getICmp(I.ICMP_UGT, A,
1160 ConstantInt::get(A->getType(), AP2.logBase2()));
1163 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1166 if (IsAShr && AP1.isNegative())
1167 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1169 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1172 if (IsAShr && AP1 == AP2.ashr(Shift)) {
1173 // There are multiple solutions if we are comparing against -1 and the LHS
1174 // of the ashr is not a power of two.
1175 if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1176 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1177 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1178 } else if (AP1 == AP2.lshr(Shift)) {
1179 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1183 // Shifting const2 will never be equal to const1.
1184 // FIXME: This should always be handled by InstSimplify?
1185 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1186 return replaceInstUsesWith(I, TorF);
1189 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1190 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1191 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A,
1194 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1196 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1197 if (I.getPredicate() == I.ICMP_NE)
1198 Pred = CmpInst::getInversePredicate(Pred);
1199 return new ICmpInst(Pred, LHS, RHS);
1202 // Don't bother doing any work for cases which InstSimplify handles.
1203 if (AP2.isNullValue())
1206 unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1208 if (!AP1 && AP2TrailingZeros != 0)
1211 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1214 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1216 // Get the distance between the lowest bits that are set.
1217 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1219 if (Shift > 0 && AP2.shl(Shift) == AP1)
1220 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1222 // Shifting const2 will never be equal to const1.
1223 // FIXME: This should always be handled by InstSimplify?
1224 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1225 return replaceInstUsesWith(I, TorF);
1228 /// The caller has matched a pattern of the form:
1229 /// I = icmp ugt (add (add A, B), CI2), CI1
1230 /// If this is of the form:
1232 /// if (sum+128 >u 255)
1233 /// Then replace it with llvm.sadd.with.overflow.i8.
1235 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1236 ConstantInt *CI2, ConstantInt *CI1,
1238 // The transformation we're trying to do here is to transform this into an
1239 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1240 // with a narrower add, and discard the add-with-constant that is part of the
1241 // range check (if we can't eliminate it, this isn't profitable).
1243 // In order to eliminate the add-with-constant, the compare can be its only
1245 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1246 if (!AddWithCst->hasOneUse())
1249 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1250 if (!CI2->getValue().isPowerOf2())
1252 unsigned NewWidth = CI2->getValue().countTrailingZeros();
1253 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1256 // The width of the new add formed is 1 more than the bias.
1259 // Check to see that CI1 is an all-ones value with NewWidth bits.
1260 if (CI1->getBitWidth() == NewWidth ||
1261 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1264 // This is only really a signed overflow check if the inputs have been
1265 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1266 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1267 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1268 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1269 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1272 // In order to replace the original add with a narrower
1273 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1274 // and truncates that discard the high bits of the add. Verify that this is
1276 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1277 for (User *U : OrigAdd->users()) {
1278 if (U == AddWithCst)
1281 // Only accept truncates for now. We would really like a nice recursive
1282 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1283 // chain to see which bits of a value are actually demanded. If the
1284 // original add had another add which was then immediately truncated, we
1285 // could still do the transformation.
1286 TruncInst *TI = dyn_cast<TruncInst>(U);
1287 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1291 // If the pattern matches, truncate the inputs to the narrower type and
1292 // use the sadd_with_overflow intrinsic to efficiently compute both the
1293 // result and the overflow bit.
1294 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1295 Value *F = Intrinsic::getDeclaration(I.getModule(),
1296 Intrinsic::sadd_with_overflow, NewType);
1298 InstCombiner::BuilderTy &Builder = IC.Builder;
1300 // Put the new code above the original add, in case there are any uses of the
1301 // add between the add and the compare.
1302 Builder.SetInsertPoint(OrigAdd);
1304 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1305 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1306 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1307 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1308 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1310 // The inner add was the result of the narrow add, zero extended to the
1311 // wider type. Replace it with the result computed by the intrinsic.
1312 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1314 // The original icmp gets replaced with the overflow value.
1315 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1318 // Handle (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1319 Instruction *InstCombiner::foldICmpWithZero(ICmpInst &Cmp) {
1320 CmpInst::Predicate Pred = Cmp.getPredicate();
1321 Value *X = Cmp.getOperand(0);
1323 if (match(Cmp.getOperand(1), m_Zero()) && Pred == ICmpInst::ICMP_SGT) {
1325 SelectPatternResult SPR = matchSelectPattern(X, A, B);
1326 if (SPR.Flavor == SPF_SMIN) {
1327 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1328 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1329 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1330 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1336 // Fold icmp Pred X, C.
1337 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
1338 CmpInst::Predicate Pred = Cmp.getPredicate();
1339 Value *X = Cmp.getOperand(0);
1342 if (!match(Cmp.getOperand(1), m_APInt(C)))
1345 Value *A = nullptr, *B = nullptr;
1347 // Match the following pattern, which is a common idiom when writing
1348 // overflow-safe integer arithmetic functions. The source performs an addition
1349 // in wider type and explicitly checks for overflow using comparisons against
1350 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1352 // TODO: This could probably be generalized to handle other overflow-safe
1353 // operations if we worked out the formulas to compute the appropriate magic
1357 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1359 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1360 if (Pred == ICmpInst::ICMP_UGT &&
1361 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1362 if (Instruction *Res = processUGT_ADDCST_ADD(
1363 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this))
1367 // FIXME: Use m_APInt to allow folds for splat constants.
1368 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1));
1372 // Canonicalize icmp instructions based on dominating conditions.
1373 BasicBlock *Parent = Cmp.getParent();
1374 BasicBlock *Dom = Parent->getSinglePredecessor();
1375 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr;
1376 ICmpInst::Predicate Pred2;
1377 BasicBlock *TrueBB, *FalseBB;
1379 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)),
1380 TrueBB, FalseBB)) &&
1381 TrueBB != FalseBB) {
1383 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue());
1384 ConstantRange DominatingCR =
1386 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue())
1387 : ConstantRange::makeExactICmpRegion(
1388 CmpInst::getInversePredicate(Pred2), CI2->getValue());
1389 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1390 ConstantRange Difference = DominatingCR.difference(CR);
1391 if (Intersection.isEmptySet())
1392 return replaceInstUsesWith(Cmp, Builder.getFalse());
1393 if (Difference.isEmptySet())
1394 return replaceInstUsesWith(Cmp, Builder.getTrue());
1396 // If this is a normal comparison, it demands all bits. If it is a sign
1397 // bit comparison, it only demands the sign bit.
1399 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit);
1401 // Canonicalizing a sign bit comparison that gets used in a branch,
1402 // pessimizes codegen by generating branch on zero instruction instead
1403 // of a test and branch. So we avoid canonicalizing in such situations
1404 // because test and branch instruction has better branch displacement
1405 // than compare and branch instruction.
1406 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1409 if (auto *AI = Intersection.getSingleElement())
1410 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI));
1411 if (auto *AD = Difference.getSingleElement())
1412 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD));
1418 /// Fold icmp (trunc X, Y), C.
1419 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
1422 ICmpInst::Predicate Pred = Cmp.getPredicate();
1423 Value *X = Trunc->getOperand(0);
1424 if (C.isOneValue() && C.getBitWidth() > 1) {
1425 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1427 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1428 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1429 ConstantInt::get(V->getType(), 1));
1432 if (Cmp.isEquality() && Trunc->hasOneUse()) {
1433 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1434 // of the high bits truncated out of x are known.
1435 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1436 SrcBits = X->getType()->getScalarSizeInBits();
1437 KnownBits Known = computeKnownBits(X, 0, &Cmp);
1439 // If all the high bits are known, we can do this xform.
1440 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1441 // Pull in the high bits from known-ones set.
1442 APInt NewRHS = C.zext(SrcBits);
1443 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1444 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1451 /// Fold icmp (xor X, Y), C.
1452 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
1453 BinaryOperator *Xor,
1455 Value *X = Xor->getOperand(0);
1456 Value *Y = Xor->getOperand(1);
1458 if (!match(Y, m_APInt(XorC)))
1461 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1463 ICmpInst::Predicate Pred = Cmp.getPredicate();
1464 bool TrueIfSigned = false;
1465 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1467 // If the sign bit of the XorCst is not set, there is no change to
1468 // the operation, just stop using the Xor.
1469 if (!XorC->isNegative()) {
1470 Cmp.setOperand(0, X);
1475 // Emit the opposite comparison.
1477 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1478 ConstantInt::getAllOnesValue(X->getType()));
1480 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1481 ConstantInt::getNullValue(X->getType()));
1484 if (Xor->hasOneUse()) {
1485 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1486 if (!Cmp.isEquality() && XorC->isSignMask()) {
1487 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1488 : Cmp.getSignedPredicate();
1489 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1492 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1493 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1494 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1495 : Cmp.getSignedPredicate();
1496 Pred = Cmp.getSwappedPredicate(Pred);
1497 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1501 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
1502 // iff -C is a power of 2
1503 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~C && (C + 1).isPowerOf2())
1504 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1506 // (icmp ult (xor X, C), -C) -> (icmp uge X, C)
1507 // iff -C is a power of 2
1508 if (Pred == ICmpInst::ICMP_ULT && *XorC == -C && C.isPowerOf2())
1509 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
1514 /// Fold icmp (and (sh X, Y), C2), C1.
1515 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
1516 const APInt &C1, const APInt &C2) {
1517 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1518 if (!Shift || !Shift->isShift())
1521 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1522 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1523 // code produced by the clang front-end, for bitfield access.
1524 // This seemingly simple opportunity to fold away a shift turns out to be
1525 // rather complicated. See PR17827 for details.
1526 unsigned ShiftOpcode = Shift->getOpcode();
1527 bool IsShl = ShiftOpcode == Instruction::Shl;
1529 if (match(Shift->getOperand(1), m_APInt(C3))) {
1530 bool CanFold = false;
1531 if (ShiftOpcode == Instruction::Shl) {
1532 // For a left shift, we can fold if the comparison is not signed. We can
1533 // also fold a signed comparison if the mask value and comparison value
1534 // are not negative. These constraints may not be obvious, but we can
1535 // prove that they are correct using an SMT solver.
1536 if (!Cmp.isSigned() || (!C2.isNegative() && !C1.isNegative()))
1539 bool IsAshr = ShiftOpcode == Instruction::AShr;
1540 // For a logical right shift, we can fold if the comparison is not signed.
1541 // We can also fold a signed comparison if the shifted mask value and the
1542 // shifted comparison value are not negative. These constraints may not be
1543 // obvious, but we can prove that they are correct using an SMT solver.
1544 // For an arithmetic shift right we can do the same, if we ensure
1545 // the And doesn't use any bits being shifted in. Normally these would
1546 // be turned into lshr by SimplifyDemandedBits, but not if there is an
1548 if (!IsAshr || (C2.shl(*C3).lshr(*C3) == C2)) {
1549 if (!Cmp.isSigned() ||
1550 (!C2.shl(*C3).isNegative() && !C1.shl(*C3).isNegative()))
1556 APInt NewCst = IsShl ? C1.lshr(*C3) : C1.shl(*C3);
1557 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
1558 // Check to see if we are shifting out any of the bits being compared.
1559 if (SameAsC1 != C1) {
1560 // If we shifted bits out, the fold is not going to work out. As a
1561 // special case, check to see if this means that the result is always
1562 // true or false now.
1563 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1564 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1565 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1566 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1568 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
1569 APInt NewAndCst = IsShl ? C2.lshr(*C3) : C2.shl(*C3);
1570 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
1571 And->setOperand(0, Shift->getOperand(0));
1572 Worklist.Add(Shift); // Shift is dead.
1578 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1579 // preferable because it allows the C2 << Y expression to be hoisted out of a
1580 // loop if Y is invariant and X is not.
1581 if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() &&
1582 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1585 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1586 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1588 // Compute X & (C2 << Y).
1589 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1590 Cmp.setOperand(0, NewAnd);
1597 /// Fold icmp (and X, C2), C1.
1598 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
1599 BinaryOperator *And,
1602 if (!match(And->getOperand(1), m_APInt(C2)))
1605 if (!And->hasOneUse())
1608 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1609 // the input width without changing the value produced, eliminate the cast:
1611 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1613 // We can do this transformation if the constants do not have their sign bits
1614 // set or if it is an equality comparison. Extending a relational comparison
1615 // when we're checking the sign bit would not work.
1617 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1618 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1619 // TODO: Is this a good transform for vectors? Wider types may reduce
1620 // throughput. Should this transform be limited (even for scalars) by using
1621 // shouldChangeType()?
1622 if (!Cmp.getType()->isVectorTy()) {
1623 Type *WideType = W->getType();
1624 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1625 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1626 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1627 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1628 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1632 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1635 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1636 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1638 // iff pred isn't signed
1639 if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() &&
1640 match(And->getOperand(1), m_One())) {
1641 Constant *One = cast<Constant>(And->getOperand(1));
1642 Value *Or = And->getOperand(0);
1643 Value *A, *B, *LShr;
1644 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1645 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1646 unsigned UsesRemoved = 0;
1647 if (And->hasOneUse())
1649 if (Or->hasOneUse())
1651 if (LShr->hasOneUse())
1654 // Compute A & ((1 << B) | 1)
1655 Value *NewOr = nullptr;
1656 if (auto *C = dyn_cast<Constant>(B)) {
1657 if (UsesRemoved >= 1)
1658 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1660 if (UsesRemoved >= 3)
1661 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1663 One, Or->getName());
1666 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1667 Cmp.setOperand(0, NewAnd);
1676 /// Fold icmp (and X, Y), C.
1677 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
1678 BinaryOperator *And,
1680 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1683 // TODO: These all require that Y is constant too, so refactor with the above.
1685 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1686 Value *X = And->getOperand(0);
1687 Value *Y = And->getOperand(1);
1688 if (auto *LI = dyn_cast<LoadInst>(X))
1689 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1690 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1691 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1692 !LI->isVolatile() && isa<ConstantInt>(Y)) {
1693 ConstantInt *C2 = cast<ConstantInt>(Y);
1694 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1698 if (!Cmp.isEquality())
1701 // X & -C == -C -> X > u ~C
1702 // X & -C != -C -> X <= u ~C
1703 // iff C is a power of 2
1704 if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) {
1705 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1706 : CmpInst::ICMP_ULE;
1707 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1710 // (X & C2) == 0 -> (trunc X) >= 0
1711 // (X & C2) != 0 -> (trunc X) < 0
1712 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1714 if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) {
1715 int32_t ExactLogBase2 = C2->exactLogBase2();
1716 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1717 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1718 if (And->getType()->isVectorTy())
1719 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
1720 Value *Trunc = Builder.CreateTrunc(X, NTy);
1721 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1722 : CmpInst::ICMP_SLT;
1723 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1730 /// Fold icmp (or X, Y), C.
1731 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
1733 ICmpInst::Predicate Pred = Cmp.getPredicate();
1734 if (C.isOneValue()) {
1735 // icmp slt signum(V) 1 --> icmp slt V, 1
1737 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1738 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1739 ConstantInt::get(V->getType(), 1));
1742 // X | C == C --> X <=u C
1743 // X | C != C --> X >u C
1744 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
1745 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) &&
1746 (C + 1).isPowerOf2()) {
1747 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1748 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1));
1751 if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse())
1755 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1756 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1757 // -> and (icmp eq P, null), (icmp eq Q, null).
1759 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1761 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1762 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1763 return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1766 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1767 // a shorter form that has more potential to be folded even further.
1768 Value *X1, *X2, *X3, *X4;
1769 if (match(Or->getOperand(0), m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1770 match(Or->getOperand(1), m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1771 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1772 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1773 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1774 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1775 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1776 return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1782 /// Fold icmp (mul X, Y), C.
1783 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp,
1784 BinaryOperator *Mul,
1787 if (!match(Mul->getOperand(1), m_APInt(MulC)))
1790 // If this is a test of the sign bit and the multiply is sign-preserving with
1791 // a constant operand, use the multiply LHS operand instead.
1792 ICmpInst::Predicate Pred = Cmp.getPredicate();
1793 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1794 if (MulC->isNegative())
1795 Pred = ICmpInst::getSwappedPredicate(Pred);
1796 return new ICmpInst(Pred, Mul->getOperand(0),
1797 Constant::getNullValue(Mul->getType()));
1803 /// Fold icmp (shl 1, Y), C.
1804 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1807 if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1810 Type *ShiftType = Shl->getType();
1811 unsigned TypeBits = C.getBitWidth();
1812 bool CIsPowerOf2 = C.isPowerOf2();
1813 ICmpInst::Predicate Pred = Cmp.getPredicate();
1814 if (Cmp.isUnsigned()) {
1815 // (1 << Y) pred C -> Y pred Log2(C)
1817 // (1 << Y) < 30 -> Y <= 4
1818 // (1 << Y) <= 30 -> Y <= 4
1819 // (1 << Y) >= 30 -> Y > 4
1820 // (1 << Y) > 30 -> Y > 4
1821 if (Pred == ICmpInst::ICMP_ULT)
1822 Pred = ICmpInst::ICMP_ULE;
1823 else if (Pred == ICmpInst::ICMP_UGE)
1824 Pred = ICmpInst::ICMP_UGT;
1827 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1828 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31
1829 unsigned CLog2 = C.logBase2();
1830 if (CLog2 == TypeBits - 1) {
1831 if (Pred == ICmpInst::ICMP_UGE)
1832 Pred = ICmpInst::ICMP_EQ;
1833 else if (Pred == ICmpInst::ICMP_ULT)
1834 Pred = ICmpInst::ICMP_NE;
1836 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1837 } else if (Cmp.isSigned()) {
1838 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1839 if (C.isAllOnesValue()) {
1840 // (1 << Y) <= -1 -> Y == 31
1841 if (Pred == ICmpInst::ICMP_SLE)
1842 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1844 // (1 << Y) > -1 -> Y != 31
1845 if (Pred == ICmpInst::ICMP_SGT)
1846 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1848 // (1 << Y) < 0 -> Y == 31
1849 // (1 << Y) <= 0 -> Y == 31
1850 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1851 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1853 // (1 << Y) >= 0 -> Y != 31
1854 // (1 << Y) > 0 -> Y != 31
1855 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
1856 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1858 } else if (Cmp.isEquality() && CIsPowerOf2) {
1859 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
1865 /// Fold icmp (shl X, Y), C.
1866 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
1867 BinaryOperator *Shl,
1869 const APInt *ShiftVal;
1870 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
1871 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
1873 const APInt *ShiftAmt;
1874 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
1875 return foldICmpShlOne(Cmp, Shl, C);
1877 // Check that the shift amount is in range. If not, don't perform undefined
1878 // shifts. When the shift is visited, it will be simplified.
1879 unsigned TypeBits = C.getBitWidth();
1880 if (ShiftAmt->uge(TypeBits))
1883 ICmpInst::Predicate Pred = Cmp.getPredicate();
1884 Value *X = Shl->getOperand(0);
1885 Type *ShType = Shl->getType();
1887 // NSW guarantees that we are only shifting out sign bits from the high bits,
1888 // so we can ASHR the compare constant without needing a mask and eliminate
1890 if (Shl->hasNoSignedWrap()) {
1891 if (Pred == ICmpInst::ICMP_SGT) {
1892 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
1893 APInt ShiftedC = C.ashr(*ShiftAmt);
1894 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1896 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1897 // This is the same code as the SGT case, but assert the pre-condition
1898 // that is needed for this to work with equality predicates.
1899 assert(C.ashr(*ShiftAmt).shl(*ShiftAmt) == C &&
1900 "Compare known true or false was not folded");
1901 APInt ShiftedC = C.ashr(*ShiftAmt);
1902 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1904 if (Pred == ICmpInst::ICMP_SLT) {
1905 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
1906 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
1907 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
1908 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
1909 assert(!C.isMinSignedValue() && "Unexpected icmp slt");
1910 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
1911 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1913 // If this is a signed comparison to 0 and the shift is sign preserving,
1914 // use the shift LHS operand instead; isSignTest may change 'Pred', so only
1915 // do that if we're sure to not continue on in this function.
1916 if (isSignTest(Pred, C))
1917 return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
1920 // NUW guarantees that we are only shifting out zero bits from the high bits,
1921 // so we can LSHR the compare constant without needing a mask and eliminate
1923 if (Shl->hasNoUnsignedWrap()) {
1924 if (Pred == ICmpInst::ICMP_UGT) {
1925 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
1926 APInt ShiftedC = C.lshr(*ShiftAmt);
1927 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1929 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1930 // This is the same code as the UGT case, but assert the pre-condition
1931 // that is needed for this to work with equality predicates.
1932 assert(C.lshr(*ShiftAmt).shl(*ShiftAmt) == C &&
1933 "Compare known true or false was not folded");
1934 APInt ShiftedC = C.lshr(*ShiftAmt);
1935 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1937 if (Pred == ICmpInst::ICMP_ULT) {
1938 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
1939 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
1940 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
1941 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
1942 assert(C.ugt(0) && "ult 0 should have been eliminated");
1943 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
1944 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1948 if (Cmp.isEquality() && Shl->hasOneUse()) {
1949 // Strength-reduce the shift into an 'and'.
1950 Constant *Mask = ConstantInt::get(
1952 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
1953 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
1954 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
1955 return new ICmpInst(Pred, And, LShrC);
1958 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
1959 bool TrueIfSigned = false;
1960 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
1961 // (X << 31) <s 0 --> (X & 1) != 0
1962 Constant *Mask = ConstantInt::get(
1964 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
1965 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
1966 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
1967 And, Constant::getNullValue(ShType));
1970 // Transform (icmp pred iM (shl iM %v, N), C)
1971 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
1972 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
1973 // This enables us to get rid of the shift in favor of a trunc that may be
1974 // free on the target. It has the additional benefit of comparing to a
1975 // smaller constant that may be more target-friendly.
1976 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
1977 if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
1978 DL.isLegalInteger(TypeBits - Amt)) {
1979 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
1980 if (ShType->isVectorTy())
1981 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
1983 ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
1984 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
1990 /// Fold icmp ({al}shr X, Y), C.
1991 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
1992 BinaryOperator *Shr,
1994 // An exact shr only shifts out zero bits, so:
1995 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
1996 Value *X = Shr->getOperand(0);
1997 CmpInst::Predicate Pred = Cmp.getPredicate();
1998 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2000 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2002 const APInt *ShiftVal;
2003 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2004 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal);
2006 const APInt *ShiftAmt;
2007 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2010 // Check that the shift amount is in range. If not, don't perform undefined
2011 // shifts. When the shift is visited it will be simplified.
2012 unsigned TypeBits = C.getBitWidth();
2013 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2014 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2017 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2018 bool IsExact = Shr->isExact();
2019 Type *ShrTy = Shr->getType();
2020 // TODO: If we could guarantee that InstSimplify would handle all of the
2021 // constant-value-based preconditions in the folds below, then we could assert
2022 // those conditions rather than checking them. This is difficult because of
2023 // undef/poison (PR34838).
2025 if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
2026 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
2027 // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
2028 APInt ShiftedC = C.shl(ShAmtVal);
2029 if (ShiftedC.ashr(ShAmtVal) == C)
2030 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2032 if (Pred == CmpInst::ICMP_SGT) {
2033 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2034 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2035 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2036 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2037 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2040 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2041 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2042 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2043 APInt ShiftedC = C.shl(ShAmtVal);
2044 if (ShiftedC.lshr(ShAmtVal) == C)
2045 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2047 if (Pred == CmpInst::ICMP_UGT) {
2048 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2049 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2050 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2051 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2055 if (!Cmp.isEquality())
2058 // Handle equality comparisons of shift-by-constant.
2060 // If the comparison constant changes with the shift, the comparison cannot
2061 // succeed (bits of the comparison constant cannot match the shifted value).
2062 // This should be known by InstSimplify and already be folded to true/false.
2063 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2064 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2065 "Expected icmp+shr simplify did not occur.");
2067 // If the bits shifted out are known zero, compare the unshifted value:
2068 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2070 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2072 if (Shr->hasOneUse()) {
2073 // Canonicalize the shift into an 'and':
2074 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2075 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2076 Constant *Mask = ConstantInt::get(ShrTy, Val);
2077 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2078 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2084 /// Fold icmp (udiv X, Y), C.
2085 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp,
2086 BinaryOperator *UDiv,
2089 if (!match(UDiv->getOperand(0), m_APInt(C2)))
2092 assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2094 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2095 Value *Y = UDiv->getOperand(1);
2096 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2097 assert(!C.isMaxValue() &&
2098 "icmp ugt X, UINT_MAX should have been simplified already.");
2099 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2100 ConstantInt::get(Y->getType(), C2->udiv(C + 1)));
2103 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2104 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2105 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2106 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2107 ConstantInt::get(Y->getType(), C2->udiv(C)));
2113 /// Fold icmp ({su}div X, Y), C.
2114 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
2115 BinaryOperator *Div,
2117 // Fold: icmp pred ([us]div X, C2), C -> range test
2118 // Fold this div into the comparison, producing a range check.
2119 // Determine, based on the divide type, what the range is being
2120 // checked. If there is an overflow on the low or high side, remember
2121 // it, otherwise compute the range [low, hi) bounding the new value.
2122 // See: InsertRangeTest above for the kinds of replacements possible.
2124 if (!match(Div->getOperand(1), m_APInt(C2)))
2127 // FIXME: If the operand types don't match the type of the divide
2128 // then don't attempt this transform. The code below doesn't have the
2129 // logic to deal with a signed divide and an unsigned compare (and
2130 // vice versa). This is because (x /s C2) <s C produces different
2131 // results than (x /s C2) <u C or (x /u C2) <s C or even
2132 // (x /u C2) <u C. Simply casting the operands and result won't
2133 // work. :( The if statement below tests that condition and bails
2135 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2136 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2139 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2140 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2141 // division-by-constant cases should be present, we can not assert that they
2142 // have happened before we reach this icmp instruction.
2143 if (C2->isNullValue() || C2->isOneValue() ||
2144 (DivIsSigned && C2->isAllOnesValue()))
2147 // Compute Prod = C * C2. We are essentially solving an equation of
2148 // form X / C2 = C. We solve for X by multiplying C2 and C.
2149 // By solving for X, we can turn this into a range check instead of computing
2151 APInt Prod = C * *C2;
2153 // Determine if the product overflows by seeing if the product is not equal to
2154 // the divide. Make sure we do the same kind of divide as in the LHS
2155 // instruction that we're folding.
2156 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2158 ICmpInst::Predicate Pred = Cmp.getPredicate();
2160 // If the division is known to be exact, then there is no remainder from the
2161 // divide, so the covered range size is unit, otherwise it is the divisor.
2162 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2164 // Figure out the interval that is being checked. For example, a comparison
2165 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2166 // Compute this interval based on the constants involved and the signedness of
2167 // the compare/divide. This computes a half-open interval, keeping track of
2168 // whether either value in the interval overflows. After analysis each
2169 // overflow variable is set to 0 if it's corresponding bound variable is valid
2170 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2171 int LoOverflow = 0, HiOverflow = 0;
2172 APInt LoBound, HiBound;
2174 if (!DivIsSigned) { // udiv
2175 // e.g. X/5 op 3 --> [15, 20)
2177 HiOverflow = LoOverflow = ProdOV;
2179 // If this is not an exact divide, then many values in the range collapse
2180 // to the same result value.
2181 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2183 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2184 if (C.isNullValue()) { // (X / pos) op 0
2185 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2186 LoBound = -(RangeSize - 1);
2187 HiBound = RangeSize;
2188 } else if (C.isStrictlyPositive()) { // (X / pos) op pos
2189 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2190 HiOverflow = LoOverflow = ProdOV;
2192 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2193 } else { // (X / pos) op neg
2194 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2196 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2198 APInt DivNeg = -RangeSize;
2199 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2202 } else if (C2->isNegative()) { // Divisor is < 0.
2205 if (C.isNullValue()) { // (X / neg) op 0
2206 // e.g. X/-5 op 0 --> [-4, 5)
2207 LoBound = RangeSize + 1;
2208 HiBound = -RangeSize;
2209 if (HiBound == *C2) { // -INTMIN = INTMIN
2210 HiOverflow = 1; // [INTMIN+1, overflow)
2211 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN
2213 } else if (C.isStrictlyPositive()) { // (X / neg) op pos
2214 // e.g. X/-5 op 3 --> [-19, -14)
2216 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2218 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2219 } else { // (X / neg) op neg
2220 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2221 LoOverflow = HiOverflow = ProdOV;
2223 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2226 // Dividing by a negative swaps the condition. LT <-> GT
2227 Pred = ICmpInst::getSwappedPredicate(Pred);
2230 Value *X = Div->getOperand(0);
2232 default: llvm_unreachable("Unhandled icmp opcode!");
2233 case ICmpInst::ICMP_EQ:
2234 if (LoOverflow && HiOverflow)
2235 return replaceInstUsesWith(Cmp, Builder.getFalse());
2237 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2238 ICmpInst::ICMP_UGE, X,
2239 ConstantInt::get(Div->getType(), LoBound));
2241 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2242 ICmpInst::ICMP_ULT, X,
2243 ConstantInt::get(Div->getType(), HiBound));
2244 return replaceInstUsesWith(
2245 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2246 case ICmpInst::ICMP_NE:
2247 if (LoOverflow && HiOverflow)
2248 return replaceInstUsesWith(Cmp, Builder.getTrue());
2250 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2251 ICmpInst::ICMP_ULT, X,
2252 ConstantInt::get(Div->getType(), LoBound));
2254 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2255 ICmpInst::ICMP_UGE, X,
2256 ConstantInt::get(Div->getType(), HiBound));
2257 return replaceInstUsesWith(Cmp,
2258 insertRangeTest(X, LoBound, HiBound,
2259 DivIsSigned, false));
2260 case ICmpInst::ICMP_ULT:
2261 case ICmpInst::ICMP_SLT:
2262 if (LoOverflow == +1) // Low bound is greater than input range.
2263 return replaceInstUsesWith(Cmp, Builder.getTrue());
2264 if (LoOverflow == -1) // Low bound is less than input range.
2265 return replaceInstUsesWith(Cmp, Builder.getFalse());
2266 return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound));
2267 case ICmpInst::ICMP_UGT:
2268 case ICmpInst::ICMP_SGT:
2269 if (HiOverflow == +1) // High bound greater than input range.
2270 return replaceInstUsesWith(Cmp, Builder.getFalse());
2271 if (HiOverflow == -1) // High bound less than input range.
2272 return replaceInstUsesWith(Cmp, Builder.getTrue());
2273 if (Pred == ICmpInst::ICMP_UGT)
2274 return new ICmpInst(ICmpInst::ICMP_UGE, X,
2275 ConstantInt::get(Div->getType(), HiBound));
2276 return new ICmpInst(ICmpInst::ICMP_SGE, X,
2277 ConstantInt::get(Div->getType(), HiBound));
2283 /// Fold icmp (sub X, Y), C.
2284 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
2285 BinaryOperator *Sub,
2287 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2288 ICmpInst::Predicate Pred = Cmp.getPredicate();
2290 // The following transforms are only worth it if the only user of the subtract
2292 if (!Sub->hasOneUse())
2295 if (Sub->hasNoSignedWrap()) {
2296 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2297 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue())
2298 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2300 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2301 if (Pred == ICmpInst::ICMP_SGT && C.isNullValue())
2302 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2304 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2305 if (Pred == ICmpInst::ICMP_SLT && C.isNullValue())
2306 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2308 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2309 if (Pred == ICmpInst::ICMP_SLT && C.isOneValue())
2310 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2314 if (!match(X, m_APInt(C2)))
2317 // C2 - Y <u C -> (Y | (C - 1)) == C2
2318 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2319 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2320 (*C2 & (C - 1)) == (C - 1))
2321 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2323 // C2 - Y >u C -> (Y | C) != C2
2324 // iff C2 & C == C and C + 1 is a power of 2
2325 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2326 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2331 /// Fold icmp (add X, Y), C.
2332 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
2333 BinaryOperator *Add,
2335 Value *Y = Add->getOperand(1);
2337 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2340 // Fold icmp pred (add X, C2), C.
2341 Value *X = Add->getOperand(0);
2342 Type *Ty = Add->getType();
2343 CmpInst::Predicate Pred = Cmp.getPredicate();
2345 // If the add does not wrap, we can always adjust the compare by subtracting
2346 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are
2347 // canonicalized to SGT/SLT.
2348 if (Add->hasNoSignedWrap() &&
2349 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) {
2351 APInt NewC = C.ssub_ov(*C2, Overflow);
2352 // If there is overflow, the result must be true or false.
2353 // TODO: Can we assert there is no overflow because InstSimplify always
2354 // handles those cases?
2356 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2357 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2360 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2361 const APInt &Upper = CR.getUpper();
2362 const APInt &Lower = CR.getLower();
2363 if (Cmp.isSigned()) {
2364 if (Lower.isSignMask())
2365 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2366 if (Upper.isSignMask())
2367 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2369 if (Lower.isMinValue())
2370 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2371 if (Upper.isMinValue())
2372 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2375 if (!Add->hasOneUse())
2378 // X+C <u C2 -> (X & -C2) == C
2379 // iff C & (C2-1) == 0
2380 // C2 is a power of 2
2381 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2382 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2383 ConstantExpr::getNeg(cast<Constant>(Y)));
2385 // X+C >u C2 -> (X & ~C2) != C
2387 // C2+1 is a power of 2
2388 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2389 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2390 ConstantExpr::getNeg(cast<Constant>(Y)));
2395 bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2396 Value *&RHS, ConstantInt *&Less,
2397 ConstantInt *&Equal,
2398 ConstantInt *&Greater) {
2399 // TODO: Generalize this to work with other comparison idioms or ensure
2400 // they get canonicalized into this form.
2402 // select i1 (a == b), i32 Equal, i32 (select i1 (a < b), i32 Less, i32
2403 // Greater), where Equal, Less and Greater are placeholders for any three
2405 ICmpInst::Predicate PredA, PredB;
2406 if (match(SI->getTrueValue(), m_ConstantInt(Equal)) &&
2407 match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) &&
2408 PredA == ICmpInst::ICMP_EQ &&
2409 match(SI->getFalseValue(),
2410 m_Select(m_ICmp(PredB, m_Specific(LHS), m_Specific(RHS)),
2411 m_ConstantInt(Less), m_ConstantInt(Greater))) &&
2412 PredB == ICmpInst::ICMP_SLT) {
2418 Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp,
2422 assert(C && "Cmp RHS should be a constant int!");
2423 // If we're testing a constant value against the result of a three way
2424 // comparison, the result can be expressed directly in terms of the
2425 // original values being compared. Note: We could possibly be more
2426 // aggressive here and remove the hasOneUse test. The original select is
2427 // really likely to simplify or sink when we remove a test of the result.
2428 Value *OrigLHS, *OrigRHS;
2429 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2430 if (Cmp.hasOneUse() &&
2431 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2433 assert(C1LessThan && C2Equal && C3GreaterThan);
2435 bool TrueWhenLessThan =
2436 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2438 bool TrueWhenEqual =
2439 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2441 bool TrueWhenGreaterThan =
2442 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2445 // This generates the new instruction that will replace the original Cmp
2446 // Instruction. Instead of enumerating the various combinations when
2447 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2448 // false, we rely on chaining of ORs and future passes of InstCombine to
2449 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2451 // When none of the three constants satisfy the predicate for the RHS (C),
2452 // the entire original Cmp can be simplified to a false.
2453 Value *Cond = Builder.getFalse();
2454 if (TrueWhenLessThan)
2455 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
2457 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
2458 if (TrueWhenGreaterThan)
2459 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
2461 return replaceInstUsesWith(Cmp, Cond);
2466 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2467 /// where X is some kind of instruction.
2468 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) {
2470 if (!match(Cmp.getOperand(1), m_APInt(C)))
2473 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) {
2474 switch (BO->getOpcode()) {
2475 case Instruction::Xor:
2476 if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C))
2479 case Instruction::And:
2480 if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C))
2483 case Instruction::Or:
2484 if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C))
2487 case Instruction::Mul:
2488 if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C))
2491 case Instruction::Shl:
2492 if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C))
2495 case Instruction::LShr:
2496 case Instruction::AShr:
2497 if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C))
2500 case Instruction::UDiv:
2501 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C))
2504 case Instruction::SDiv:
2505 if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C))
2508 case Instruction::Sub:
2509 if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C))
2512 case Instruction::Add:
2513 if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C))
2519 // TODO: These folds could be refactored to be part of the above calls.
2520 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C))
2524 // Match against CmpInst LHS being instructions other than binary operators.
2526 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) {
2527 // For now, we only support constant integers while folding the
2528 // ICMP(SELECT)) pattern. We can extend this to support vector of integers
2529 // similar to the cases handled by binary ops above.
2530 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
2531 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
2535 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) {
2536 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
2540 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, *C))
2546 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2547 /// icmp eq/ne BO, C.
2548 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
2551 // TODO: Some of these folds could work with arbitrary constants, but this
2552 // function is limited to scalar and vector splat constants.
2553 if (!Cmp.isEquality())
2556 ICmpInst::Predicate Pred = Cmp.getPredicate();
2557 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2558 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2559 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2561 switch (BO->getOpcode()) {
2562 case Instruction::SRem:
2563 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2564 if (C.isNullValue() && BO->hasOneUse()) {
2566 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2567 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
2568 return new ICmpInst(Pred, NewRem,
2569 Constant::getNullValue(BO->getType()));
2573 case Instruction::Add: {
2574 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2576 if (match(BOp1, m_APInt(BOC))) {
2577 if (BO->hasOneUse()) {
2578 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1));
2579 return new ICmpInst(Pred, BOp0, SubC);
2581 } else if (C.isNullValue()) {
2582 // Replace ((add A, B) != 0) with (A != -B) if A or B is
2583 // efficiently invertible, or if the add has just this one use.
2584 if (Value *NegVal = dyn_castNegVal(BOp1))
2585 return new ICmpInst(Pred, BOp0, NegVal);
2586 if (Value *NegVal = dyn_castNegVal(BOp0))
2587 return new ICmpInst(Pred, NegVal, BOp1);
2588 if (BO->hasOneUse()) {
2589 Value *Neg = Builder.CreateNeg(BOp1);
2591 return new ICmpInst(Pred, BOp0, Neg);
2596 case Instruction::Xor:
2597 if (BO->hasOneUse()) {
2598 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2599 // For the xor case, we can xor two constants together, eliminating
2600 // the explicit xor.
2601 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
2602 } else if (C.isNullValue()) {
2603 // Replace ((xor A, B) != 0) with (A != B)
2604 return new ICmpInst(Pred, BOp0, BOp1);
2608 case Instruction::Sub:
2609 if (BO->hasOneUse()) {
2611 if (match(BOp0, m_APInt(BOC))) {
2612 // Replace ((sub BOC, B) != C) with (B != BOC-C).
2613 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS);
2614 return new ICmpInst(Pred, BOp1, SubC);
2615 } else if (C.isNullValue()) {
2616 // Replace ((sub A, B) != 0) with (A != B).
2617 return new ICmpInst(Pred, BOp0, BOp1);
2621 case Instruction::Or: {
2623 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
2624 // Comparing if all bits outside of a constant mask are set?
2625 // Replace (X | C) == -1 with (X & ~C) == ~C.
2626 // This removes the -1 constant.
2627 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
2628 Value *And = Builder.CreateAnd(BOp0, NotBOC);
2629 return new ICmpInst(Pred, And, NotBOC);
2633 case Instruction::And: {
2635 if (match(BOp1, m_APInt(BOC))) {
2636 // If we have ((X & C) == C), turn it into ((X & C) != 0).
2637 if (C == *BOC && C.isPowerOf2())
2638 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
2639 BO, Constant::getNullValue(RHS->getType()));
2641 // Don't perform the following transforms if the AND has multiple uses
2642 if (!BO->hasOneUse())
2645 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
2646 if (BOC->isSignMask()) {
2647 Constant *Zero = Constant::getNullValue(BOp0->getType());
2648 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
2649 return new ICmpInst(NewPred, BOp0, Zero);
2652 // ((X & ~7) == 0) --> X < 8
2653 if (C.isNullValue() && (~(*BOC) + 1).isPowerOf2()) {
2654 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1));
2655 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
2656 return new ICmpInst(NewPred, BOp0, NegBOC);
2661 case Instruction::Mul:
2662 if (C.isNullValue() && BO->hasNoSignedWrap()) {
2664 if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) {
2665 // The trivial case (mul X, 0) is handled by InstSimplify.
2666 // General case : (mul X, C) != 0 iff X != 0
2667 // (mul X, C) == 0 iff X == 0
2668 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType()));
2672 case Instruction::UDiv:
2673 if (C.isNullValue()) {
2674 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
2675 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
2676 return new ICmpInst(NewPred, BOp1, BOp0);
2685 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
2686 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
2688 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0));
2689 if (!II || !Cmp.isEquality())
2692 // Handle icmp {eq|ne} <intrinsic>, Constant.
2693 Type *Ty = II->getType();
2694 switch (II->getIntrinsicID()) {
2695 case Intrinsic::bswap:
2697 Cmp.setOperand(0, II->getArgOperand(0));
2698 Cmp.setOperand(1, ConstantInt::get(Ty, C.byteSwap()));
2701 case Intrinsic::ctlz:
2702 case Intrinsic::cttz:
2703 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
2704 if (C == C.getBitWidth()) {
2706 Cmp.setOperand(0, II->getArgOperand(0));
2707 Cmp.setOperand(1, ConstantInt::getNullValue(Ty));
2712 case Intrinsic::ctpop: {
2713 // popcount(A) == 0 -> A == 0 and likewise for !=
2714 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
2715 bool IsZero = C.isNullValue();
2716 if (IsZero || C == C.getBitWidth()) {
2718 Cmp.setOperand(0, II->getArgOperand(0));
2720 IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty);
2721 Cmp.setOperand(1, NewOp);
2733 /// Handle icmp with constant (but not simple integer constant) RHS.
2734 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
2735 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2736 Constant *RHSC = dyn_cast<Constant>(Op1);
2737 Instruction *LHSI = dyn_cast<Instruction>(Op0);
2741 switch (LHSI->getOpcode()) {
2742 case Instruction::GetElementPtr:
2743 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
2744 if (RHSC->isNullValue() &&
2745 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
2746 return new ICmpInst(
2747 I.getPredicate(), LHSI->getOperand(0),
2748 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2750 case Instruction::PHI:
2751 // Only fold icmp into the PHI if the phi and icmp are in the same
2752 // block. If in the same block, we're encouraging jump threading. If
2753 // not, we are just pessimizing the code by making an i1 phi.
2754 if (LHSI->getParent() == I.getParent())
2755 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
2758 case Instruction::Select: {
2759 // If either operand of the select is a constant, we can fold the
2760 // comparison into the select arms, which will cause one to be
2761 // constant folded and the select turned into a bitwise or.
2762 Value *Op1 = nullptr, *Op2 = nullptr;
2763 ConstantInt *CI = nullptr;
2764 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
2765 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2766 CI = dyn_cast<ConstantInt>(Op1);
2768 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
2769 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2770 CI = dyn_cast<ConstantInt>(Op2);
2773 // We only want to perform this transformation if it will not lead to
2774 // additional code. This is true if either both sides of the select
2775 // fold to a constant (in which case the icmp is replaced with a select
2776 // which will usually simplify) or this is the only user of the
2777 // select (in which case we are trading a select+icmp for a simpler
2778 // select+icmp) or all uses of the select can be replaced based on
2779 // dominance information ("Global cases").
2780 bool Transform = false;
2783 else if (Op1 || Op2) {
2785 if (LHSI->hasOneUse())
2788 else if (CI && !CI->isZero())
2789 // When Op1 is constant try replacing select with second operand.
2790 // Otherwise Op2 is constant and try replacing select with first
2793 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
2797 Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
2800 Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
2802 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
2806 case Instruction::IntToPtr:
2807 // icmp pred inttoptr(X), null -> icmp pred X, 0
2808 if (RHSC->isNullValue() &&
2809 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
2810 return new ICmpInst(
2811 I.getPredicate(), LHSI->getOperand(0),
2812 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2815 case Instruction::Load:
2816 // Try to optimize things like "A[i] > 4" to index computations.
2817 if (GetElementPtrInst *GEP =
2818 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
2819 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
2820 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
2821 !cast<LoadInst>(LHSI)->isVolatile())
2822 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
2831 /// Try to fold icmp (binop), X or icmp X, (binop).
2832 /// TODO: A large part of this logic is duplicated in InstSimplify's
2833 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
2835 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
2836 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2838 // Special logic for binary operators.
2839 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
2840 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
2844 const CmpInst::Predicate Pred = I.getPredicate();
2845 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
2846 if (BO0 && isa<OverflowingBinaryOperator>(BO0))
2848 ICmpInst::isEquality(Pred) ||
2849 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
2850 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
2851 if (BO1 && isa<OverflowingBinaryOperator>(BO1))
2853 ICmpInst::isEquality(Pred) ||
2854 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
2855 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
2857 // Analyze the case when either Op0 or Op1 is an add instruction.
2858 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
2859 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2860 if (BO0 && BO0->getOpcode() == Instruction::Add) {
2861 A = BO0->getOperand(0);
2862 B = BO0->getOperand(1);
2864 if (BO1 && BO1->getOpcode() == Instruction::Add) {
2865 C = BO1->getOperand(0);
2866 D = BO1->getOperand(1);
2869 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2870 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
2871 return new ICmpInst(Pred, A == Op1 ? B : A,
2872 Constant::getNullValue(Op1->getType()));
2874 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2875 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
2876 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
2879 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
2880 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
2882 // Try not to increase register pressure.
2883 BO0->hasOneUse() && BO1->hasOneUse()) {
2884 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2887 // C + B == C + D -> B == D
2890 } else if (A == D) {
2891 // D + B == C + D -> B == C
2894 } else if (B == C) {
2895 // A + C == C + D -> A == D
2900 // A + D == C + D -> A == C
2904 return new ICmpInst(Pred, Y, Z);
2907 // icmp slt (X + -1), Y -> icmp sle X, Y
2908 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
2909 match(B, m_AllOnes()))
2910 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
2912 // icmp sge (X + -1), Y -> icmp sgt X, Y
2913 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
2914 match(B, m_AllOnes()))
2915 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
2917 // icmp sle (X + 1), Y -> icmp slt X, Y
2918 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
2919 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
2921 // icmp sgt (X + 1), Y -> icmp sge X, Y
2922 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
2923 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
2925 // icmp sgt X, (Y + -1) -> icmp sge X, Y
2926 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
2927 match(D, m_AllOnes()))
2928 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
2930 // icmp sle X, (Y + -1) -> icmp slt X, Y
2931 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
2932 match(D, m_AllOnes()))
2933 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
2935 // icmp sge X, (Y + 1) -> icmp sgt X, Y
2936 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
2937 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
2939 // icmp slt X, (Y + 1) -> icmp sle X, Y
2940 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
2941 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
2943 // TODO: The subtraction-related identities shown below also hold, but
2944 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
2945 // wouldn't happen even if they were implemented.
2947 // icmp ult (X - 1), Y -> icmp ule X, Y
2948 // icmp uge (X - 1), Y -> icmp ugt X, Y
2949 // icmp ugt X, (Y - 1) -> icmp uge X, Y
2950 // icmp ule X, (Y - 1) -> icmp ult X, Y
2952 // icmp ule (X + 1), Y -> icmp ult X, Y
2953 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
2954 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
2956 // icmp ugt (X + 1), Y -> icmp uge X, Y
2957 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
2958 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
2960 // icmp uge X, (Y + 1) -> icmp ugt X, Y
2961 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
2962 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
2964 // icmp ult X, (Y + 1) -> icmp ule X, Y
2965 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
2966 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
2968 // if C1 has greater magnitude than C2:
2969 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y
2970 // s.t. C3 = C1 - C2
2972 // if C2 has greater magnitude than C1:
2973 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3)
2974 // s.t. C3 = C2 - C1
2975 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
2976 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
2977 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
2978 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
2979 const APInt &AP1 = C1->getValue();
2980 const APInt &AP2 = C2->getValue();
2981 if (AP1.isNegative() == AP2.isNegative()) {
2982 APInt AP1Abs = C1->getValue().abs();
2983 APInt AP2Abs = C2->getValue().abs();
2984 if (AP1Abs.uge(AP2Abs)) {
2985 ConstantInt *C3 = Builder.getInt(AP1 - AP2);
2986 Value *NewAdd = Builder.CreateNSWAdd(A, C3);
2987 return new ICmpInst(Pred, NewAdd, C);
2989 ConstantInt *C3 = Builder.getInt(AP2 - AP1);
2990 Value *NewAdd = Builder.CreateNSWAdd(C, C3);
2991 return new ICmpInst(Pred, A, NewAdd);
2996 // Analyze the case when either Op0 or Op1 is a sub instruction.
2997 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
3002 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
3003 A = BO0->getOperand(0);
3004 B = BO0->getOperand(1);
3006 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
3007 C = BO1->getOperand(0);
3008 D = BO1->getOperand(1);
3011 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
3012 if (A == Op1 && NoOp0WrapProblem)
3013 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
3015 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
3016 if (C == Op0 && NoOp1WrapProblem)
3017 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
3019 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
3020 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
3021 // Try not to increase register pressure.
3022 BO0->hasOneUse() && BO1->hasOneUse())
3023 return new ICmpInst(Pred, A, C);
3025 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
3026 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
3027 // Try not to increase register pressure.
3028 BO0->hasOneUse() && BO1->hasOneUse())
3029 return new ICmpInst(Pred, D, B);
3031 // icmp (0-X) < cst --> x > -cst
3032 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
3034 if (match(BO0, m_Neg(m_Value(X))))
3035 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
3036 if (!RHSC->isMinValue(/*isSigned=*/true))
3037 return new ICmpInst(I.getSwappedPredicate(), X,
3038 ConstantExpr::getNeg(RHSC));
3041 BinaryOperator *SRem = nullptr;
3042 // icmp (srem X, Y), Y
3043 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
3045 // icmp Y, (srem X, Y)
3046 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3047 Op0 == BO1->getOperand(1))
3050 // We don't check hasOneUse to avoid increasing register pressure because
3051 // the value we use is the same value this instruction was already using.
3052 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3055 case ICmpInst::ICMP_EQ:
3056 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3057 case ICmpInst::ICMP_NE:
3058 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
3059 case ICmpInst::ICMP_SGT:
3060 case ICmpInst::ICMP_SGE:
3061 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
3062 Constant::getAllOnesValue(SRem->getType()));
3063 case ICmpInst::ICMP_SLT:
3064 case ICmpInst::ICMP_SLE:
3065 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
3066 Constant::getNullValue(SRem->getType()));
3070 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
3071 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
3072 switch (BO0->getOpcode()) {
3075 case Instruction::Add:
3076 case Instruction::Sub:
3077 case Instruction::Xor: {
3078 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
3079 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3082 if (match(BO0->getOperand(1), m_APInt(C))) {
3083 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
3084 if (C->isSignMask()) {
3085 ICmpInst::Predicate NewPred =
3086 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3087 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3090 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
3091 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
3092 ICmpInst::Predicate NewPred =
3093 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3094 NewPred = I.getSwappedPredicate(NewPred);
3095 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3100 case Instruction::Mul: {
3101 if (!I.isEquality())
3105 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
3107 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
3108 // Mask = -1 >> count-trailing-zeros(C).
3109 if (unsigned TZs = C->countTrailingZeros()) {
3110 Constant *Mask = ConstantInt::get(
3112 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
3113 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
3114 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
3115 return new ICmpInst(Pred, And1, And2);
3117 // If there are no trailing zeros in the multiplier, just eliminate
3118 // the multiplies (no masking is needed):
3119 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y
3120 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3124 case Instruction::UDiv:
3125 case Instruction::LShr:
3126 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
3128 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3130 case Instruction::SDiv:
3131 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
3133 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3135 case Instruction::AShr:
3136 if (!BO0->isExact() || !BO1->isExact())
3138 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3140 case Instruction::Shl: {
3141 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
3142 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
3145 if (!NSW && I.isSigned())
3147 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3153 // Transform A & (L - 1) `ult` L --> L != 0
3154 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
3155 auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
3157 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
3158 auto *Zero = Constant::getNullValue(BO0->getType());
3159 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
3166 /// Fold icmp Pred min|max(X, Y), X.
3167 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
3168 ICmpInst::Predicate Pred = Cmp.getPredicate();
3169 Value *Op0 = Cmp.getOperand(0);
3170 Value *X = Cmp.getOperand(1);
3172 // Canonicalize minimum or maximum operand to LHS of the icmp.
3173 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
3174 match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
3175 match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
3176 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
3178 Pred = Cmp.getSwappedPredicate();
3182 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
3183 // smin(X, Y) == X --> X s<= Y
3184 // smin(X, Y) s>= X --> X s<= Y
3185 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
3186 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3188 // smin(X, Y) != X --> X s> Y
3189 // smin(X, Y) s< X --> X s> Y
3190 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
3191 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3193 // These cases should be handled in InstSimplify:
3194 // smin(X, Y) s<= X --> true
3195 // smin(X, Y) s> X --> false
3199 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
3200 // smax(X, Y) == X --> X s>= Y
3201 // smax(X, Y) s<= X --> X s>= Y
3202 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
3203 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3205 // smax(X, Y) != X --> X s< Y
3206 // smax(X, Y) s> X --> X s< Y
3207 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
3208 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3210 // These cases should be handled in InstSimplify:
3211 // smax(X, Y) s>= X --> true
3212 // smax(X, Y) s< X --> false
3216 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
3217 // umin(X, Y) == X --> X u<= Y
3218 // umin(X, Y) u>= X --> X u<= Y
3219 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
3220 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
3222 // umin(X, Y) != X --> X u> Y
3223 // umin(X, Y) u< X --> X u> Y
3224 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
3225 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
3227 // These cases should be handled in InstSimplify:
3228 // umin(X, Y) u<= X --> true
3229 // umin(X, Y) u> X --> false
3233 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
3234 // umax(X, Y) == X --> X u>= Y
3235 // umax(X, Y) u<= X --> X u>= Y
3236 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
3237 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
3239 // umax(X, Y) != X --> X u< Y
3240 // umax(X, Y) u> X --> X u< Y
3241 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
3242 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
3244 // These cases should be handled in InstSimplify:
3245 // umax(X, Y) u>= X --> true
3246 // umax(X, Y) u< X --> false
3253 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
3254 if (!I.isEquality())
3257 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3258 const CmpInst::Predicate Pred = I.getPredicate();
3259 Value *A, *B, *C, *D;
3260 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
3261 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
3262 Value *OtherVal = A == Op1 ? B : A;
3263 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
3266 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
3267 // A^c1 == C^c2 --> A == C^(c1^c2)
3268 ConstantInt *C1, *C2;
3269 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
3271 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
3272 Value *Xor = Builder.CreateXor(C, NC);
3273 return new ICmpInst(Pred, A, Xor);
3276 // A^B == A^D -> B == D
3278 return new ICmpInst(Pred, B, D);
3280 return new ICmpInst(Pred, B, C);
3282 return new ICmpInst(Pred, A, D);
3284 return new ICmpInst(Pred, A, C);
3288 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
3289 // A == (A^B) -> B == 0
3290 Value *OtherVal = A == Op0 ? B : A;
3291 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
3294 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
3295 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
3296 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
3297 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
3303 } else if (A == D) {
3307 } else if (B == C) {
3311 } else if (B == D) {
3317 if (X) { // Build (X^Y) & Z
3318 Op1 = Builder.CreateXor(X, Y);
3319 Op1 = Builder.CreateAnd(Op1, Z);
3320 I.setOperand(0, Op1);
3321 I.setOperand(1, Constant::getNullValue(Op1->getType()));
3326 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
3327 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
3329 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
3330 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
3331 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
3332 match(Op1, m_ZExt(m_Value(A))))) {
3333 APInt Pow2 = Cst1->getValue() + 1;
3334 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
3335 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
3336 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
3339 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
3340 // For lshr and ashr pairs.
3341 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3342 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
3343 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3344 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
3345 unsigned TypeBits = Cst1->getBitWidth();
3346 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3347 if (ShAmt < TypeBits && ShAmt != 0) {
3348 ICmpInst::Predicate NewPred =
3349 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
3350 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
3351 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
3352 return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
3356 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
3357 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
3358 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
3359 unsigned TypeBits = Cst1->getBitWidth();
3360 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3361 if (ShAmt < TypeBits && ShAmt != 0) {
3362 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
3363 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
3364 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
3365 I.getName() + ".mask");
3366 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
3370 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
3371 // "icmp (and X, mask), cst"
3373 if (Op0->hasOneUse() &&
3374 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
3375 match(Op1, m_ConstantInt(Cst1)) &&
3376 // Only do this when A has multiple uses. This is most important to do
3377 // when it exposes other optimizations.
3379 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
3381 if (ShAmt < ASize) {
3383 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
3386 APInt CmpV = Cst1->getValue().zext(ASize);
3389 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
3390 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
3394 // If both operands are byte-swapped or bit-reversed, just compare the
3396 // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
3397 // and handle more intrinsics.
3398 if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
3399 (match(Op0, m_BitReverse(m_Value(A))) &&
3400 match(Op1, m_BitReverse(m_Value(B)))))
3401 return new ICmpInst(Pred, A, B);
3406 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so
3408 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
3409 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0));
3410 Value *LHSCIOp = LHSCI->getOperand(0);
3411 Type *SrcTy = LHSCIOp->getType();
3412 Type *DestTy = LHSCI->getType();
3415 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
3416 // integer type is the same size as the pointer type.
3417 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
3418 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
3419 Value *RHSOp = nullptr;
3420 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
3421 Value *RHSCIOp = RHSC->getOperand(0);
3422 if (RHSCIOp->getType()->getPointerAddressSpace() ==
3423 LHSCIOp->getType()->getPointerAddressSpace()) {
3424 RHSOp = RHSC->getOperand(0);
3425 // If the pointer types don't match, insert a bitcast.
3426 if (LHSCIOp->getType() != RHSOp->getType())
3427 RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType());
3429 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
3430 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
3434 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp);
3437 // The code below only handles extension cast instructions, so far.
3439 if (LHSCI->getOpcode() != Instruction::ZExt &&
3440 LHSCI->getOpcode() != Instruction::SExt)
3443 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
3444 bool isSignedCmp = ICmp.isSigned();
3446 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) {
3447 // Not an extension from the same type?
3448 RHSCIOp = CI->getOperand(0);
3449 if (RHSCIOp->getType() != LHSCIOp->getType())
3452 // If the signedness of the two casts doesn't agree (i.e. one is a sext
3453 // and the other is a zext), then we can't handle this.
3454 if (CI->getOpcode() != LHSCI->getOpcode())
3457 // Deal with equality cases early.
3458 if (ICmp.isEquality())
3459 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3461 // A signed comparison of sign extended values simplifies into a
3462 // signed comparison.
3463 if (isSignedCmp && isSignedExt)
3464 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3466 // The other three cases all fold into an unsigned comparison.
3467 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
3470 // If we aren't dealing with a constant on the RHS, exit early.
3471 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
3475 // Compute the constant that would happen if we truncated to SrcTy then
3476 // re-extended to DestTy.
3477 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
3478 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy);
3480 // If the re-extended constant didn't change...
3482 // Deal with equality cases early.
3483 if (ICmp.isEquality())
3484 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3486 // A signed comparison of sign extended values simplifies into a
3487 // signed comparison.
3488 if (isSignedExt && isSignedCmp)
3489 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3491 // The other three cases all fold into an unsigned comparison.
3492 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1);
3495 // The re-extended constant changed, partly changed (in the case of a vector),
3496 // or could not be determined to be equal (in the case of a constant
3497 // expression), so the constant cannot be represented in the shorter type.
3498 // Consequently, we cannot emit a simple comparison.
3499 // All the cases that fold to true or false will have already been handled
3500 // by SimplifyICmpInst, so only deal with the tricky case.
3502 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C))
3505 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
3506 // should have been folded away previously and not enter in here.
3508 // We're performing an unsigned comp with a sign extended value.
3509 // This is true if the input is >= 0. [aka >s -1]
3510 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
3511 Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
3513 // Finally, return the value computed.
3514 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
3515 return replaceInstUsesWith(ICmp, Result);
3517 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
3518 return BinaryOperator::CreateNot(Result);
3521 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
3522 Value *RHS, Instruction &OrigI,
3523 Value *&Result, Constant *&Overflow) {
3524 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
3525 std::swap(LHS, RHS);
3527 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) {
3529 Overflow = OverflowVal;
3531 Result->takeName(&OrigI);
3535 // If the overflow check was an add followed by a compare, the insertion point
3536 // may be pointing to the compare. We want to insert the new instructions
3537 // before the add in case there are uses of the add between the add and the
3539 Builder.SetInsertPoint(&OrigI);
3543 llvm_unreachable("bad overflow check kind!");
3545 case OCF_UNSIGNED_ADD: {
3546 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
3547 if (OR == OverflowResult::NeverOverflows)
3548 return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(),
3551 if (OR == OverflowResult::AlwaysOverflows)
3552 return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true);
3554 // Fall through uadd into sadd
3557 case OCF_SIGNED_ADD: {
3558 // X + 0 -> {X, false}
3559 if (match(RHS, m_Zero()))
3560 return SetResult(LHS, Builder.getFalse(), false);
3562 // We can strength reduce this signed add into a regular add if we can prove
3563 // that it will never overflow.
3564 if (OCF == OCF_SIGNED_ADD)
3565 if (willNotOverflowSignedAdd(LHS, RHS, OrigI))
3566 return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(),
3571 case OCF_UNSIGNED_SUB:
3572 case OCF_SIGNED_SUB: {
3573 // X - 0 -> {X, false}
3574 if (match(RHS, m_Zero()))
3575 return SetResult(LHS, Builder.getFalse(), false);
3577 if (OCF == OCF_SIGNED_SUB) {
3578 if (willNotOverflowSignedSub(LHS, RHS, OrigI))
3579 return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(),
3582 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI))
3583 return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(),
3589 case OCF_UNSIGNED_MUL: {
3590 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
3591 if (OR == OverflowResult::NeverOverflows)
3592 return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(),
3594 if (OR == OverflowResult::AlwaysOverflows)
3595 return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true);
3598 case OCF_SIGNED_MUL:
3599 // X * undef -> undef
3600 if (isa<UndefValue>(RHS))
3601 return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false);
3603 // X * 0 -> {0, false}
3604 if (match(RHS, m_Zero()))
3605 return SetResult(RHS, Builder.getFalse(), false);
3607 // X * 1 -> {X, false}
3608 if (match(RHS, m_One()))
3609 return SetResult(LHS, Builder.getFalse(), false);
3611 if (OCF == OCF_SIGNED_MUL)
3612 if (willNotOverflowSignedMul(LHS, RHS, OrigI))
3613 return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(),
3621 /// \brief Recognize and process idiom involving test for multiplication
3624 /// The caller has matched a pattern of the form:
3625 /// I = cmp u (mul(zext A, zext B), V
3626 /// The function checks if this is a test for overflow and if so replaces
3627 /// multiplication with call to 'mul.with.overflow' intrinsic.
3629 /// \param I Compare instruction.
3630 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
3631 /// the compare instruction. Must be of integer type.
3632 /// \param OtherVal The other argument of compare instruction.
3633 /// \returns Instruction which must replace the compare instruction, NULL if no
3634 /// replacement required.
3635 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
3636 Value *OtherVal, InstCombiner &IC) {
3637 // Don't bother doing this transformation for pointers, don't do it for
3639 if (!isa<IntegerType>(MulVal->getType()))
3642 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
3643 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
3644 auto *MulInstr = dyn_cast<Instruction>(MulVal);
3647 assert(MulInstr->getOpcode() == Instruction::Mul);
3649 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
3650 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
3651 assert(LHS->getOpcode() == Instruction::ZExt);
3652 assert(RHS->getOpcode() == Instruction::ZExt);
3653 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
3655 // Calculate type and width of the result produced by mul.with.overflow.
3656 Type *TyA = A->getType(), *TyB = B->getType();
3657 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
3658 WidthB = TyB->getPrimitiveSizeInBits();
3661 if (WidthB > WidthA) {
3669 // In order to replace the original mul with a narrower mul.with.overflow,
3670 // all uses must ignore upper bits of the product. The number of used low
3671 // bits must be not greater than the width of mul.with.overflow.
3672 if (MulVal->hasNUsesOrMore(2))
3673 for (User *U : MulVal->users()) {
3676 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3677 // Check if truncation ignores bits above MulWidth.
3678 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
3679 if (TruncWidth > MulWidth)
3681 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3682 // Check if AND ignores bits above MulWidth.
3683 if (BO->getOpcode() != Instruction::And)
3685 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
3686 const APInt &CVal = CI->getValue();
3687 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
3690 // In this case we could have the operand of the binary operation
3691 // being defined in another block, and performing the replacement
3692 // could break the dominance relation.
3696 // Other uses prohibit this transformation.
3701 // Recognize patterns
3702 switch (I.getPredicate()) {
3703 case ICmpInst::ICMP_EQ:
3704 case ICmpInst::ICMP_NE:
3705 // Recognize pattern:
3706 // mulval = mul(zext A, zext B)
3707 // cmp eq/neq mulval, zext trunc mulval
3708 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
3709 if (Zext->hasOneUse()) {
3710 Value *ZextArg = Zext->getOperand(0);
3711 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
3712 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
3716 // Recognize pattern:
3717 // mulval = mul(zext A, zext B)
3718 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
3721 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
3722 if (ValToMask != MulVal)
3724 const APInt &CVal = CI->getValue() + 1;
3725 if (CVal.isPowerOf2()) {
3726 unsigned MaskWidth = CVal.logBase2();
3727 if (MaskWidth == MulWidth)
3728 break; // Recognized
3733 case ICmpInst::ICMP_UGT:
3734 // Recognize pattern:
3735 // mulval = mul(zext A, zext B)
3736 // cmp ugt mulval, max
3737 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3738 APInt MaxVal = APInt::getMaxValue(MulWidth);
3739 MaxVal = MaxVal.zext(CI->getBitWidth());
3740 if (MaxVal.eq(CI->getValue()))
3741 break; // Recognized
3745 case ICmpInst::ICMP_UGE:
3746 // Recognize pattern:
3747 // mulval = mul(zext A, zext B)
3748 // cmp uge mulval, max+1
3749 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3750 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3751 if (MaxVal.eq(CI->getValue()))
3752 break; // Recognized
3756 case ICmpInst::ICMP_ULE:
3757 // Recognize pattern:
3758 // mulval = mul(zext A, zext B)
3759 // cmp ule mulval, max
3760 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3761 APInt MaxVal = APInt::getMaxValue(MulWidth);
3762 MaxVal = MaxVal.zext(CI->getBitWidth());
3763 if (MaxVal.eq(CI->getValue()))
3764 break; // Recognized
3768 case ICmpInst::ICMP_ULT:
3769 // Recognize pattern:
3770 // mulval = mul(zext A, zext B)
3771 // cmp ule mulval, max + 1
3772 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3773 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3774 if (MaxVal.eq(CI->getValue()))
3775 break; // Recognized
3783 InstCombiner::BuilderTy &Builder = IC.Builder;
3784 Builder.SetInsertPoint(MulInstr);
3786 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
3787 Value *MulA = A, *MulB = B;
3788 if (WidthA < MulWidth)
3789 MulA = Builder.CreateZExt(A, MulType);
3790 if (WidthB < MulWidth)
3791 MulB = Builder.CreateZExt(B, MulType);
3792 Value *F = Intrinsic::getDeclaration(I.getModule(),
3793 Intrinsic::umul_with_overflow, MulType);
3794 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
3795 IC.Worklist.Add(MulInstr);
3797 // If there are uses of mul result other than the comparison, we know that
3798 // they are truncation or binary AND. Change them to use result of
3799 // mul.with.overflow and adjust properly mask/size.
3800 if (MulVal->hasNUsesOrMore(2)) {
3801 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
3802 for (User *U : MulVal->users()) {
3803 if (U == &I || U == OtherVal)
3805 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3806 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
3807 IC.replaceInstUsesWith(*TI, Mul);
3809 TI->setOperand(0, Mul);
3810 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3811 assert(BO->getOpcode() == Instruction::And);
3812 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
3813 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
3814 APInt ShortMask = CI->getValue().trunc(MulWidth);
3815 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
3817 cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType()));
3818 IC.Worklist.Add(Zext);
3819 IC.replaceInstUsesWith(*BO, Zext);
3821 llvm_unreachable("Unexpected Binary operation");
3823 IC.Worklist.Add(cast<Instruction>(U));
3826 if (isa<Instruction>(OtherVal))
3827 IC.Worklist.Add(cast<Instruction>(OtherVal));
3829 // The original icmp gets replaced with the overflow value, maybe inverted
3830 // depending on predicate.
3831 bool Inverse = false;
3832 switch (I.getPredicate()) {
3833 case ICmpInst::ICMP_NE:
3835 case ICmpInst::ICMP_EQ:
3838 case ICmpInst::ICMP_UGT:
3839 case ICmpInst::ICMP_UGE:
3840 if (I.getOperand(0) == MulVal)
3844 case ICmpInst::ICMP_ULT:
3845 case ICmpInst::ICMP_ULE:
3846 if (I.getOperand(1) == MulVal)
3851 llvm_unreachable("Unexpected predicate");
3854 Value *Res = Builder.CreateExtractValue(Call, 1);
3855 return BinaryOperator::CreateNot(Res);
3858 return ExtractValueInst::Create(Call, 1);
3861 /// When performing a comparison against a constant, it is possible that not all
3862 /// the bits in the LHS are demanded. This helper method computes the mask that
3864 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
3866 if (!match(I.getOperand(1), m_APInt(RHS)))
3867 return APInt::getAllOnesValue(BitWidth);
3869 // If this is a normal comparison, it demands all bits. If it is a sign bit
3870 // comparison, it only demands the sign bit.
3872 if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
3873 return APInt::getSignMask(BitWidth);
3875 switch (I.getPredicate()) {
3876 // For a UGT comparison, we don't care about any bits that
3877 // correspond to the trailing ones of the comparand. The value of these
3878 // bits doesn't impact the outcome of the comparison, because any value
3879 // greater than the RHS must differ in a bit higher than these due to carry.
3880 case ICmpInst::ICMP_UGT:
3881 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
3883 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
3884 // Any value less than the RHS must differ in a higher bit because of carries.
3885 case ICmpInst::ICMP_ULT:
3886 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
3889 return APInt::getAllOnesValue(BitWidth);
3893 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
3894 /// should be swapped.
3895 /// The decision is based on how many times these two operands are reused
3896 /// as subtract operands and their positions in those instructions.
3897 /// The rational is that several architectures use the same instruction for
3898 /// both subtract and cmp, thus it is better if the order of those operands
3900 /// \return true if Op0 and Op1 should be swapped.
3901 static bool swapMayExposeCSEOpportunities(const Value * Op0,
3902 const Value * Op1) {
3903 // Filter out pointer value as those cannot appears directly in subtract.
3904 // FIXME: we may want to go through inttoptrs or bitcasts.
3905 if (Op0->getType()->isPointerTy())
3907 // Count every uses of both Op0 and Op1 in a subtract.
3908 // Each time Op0 is the first operand, count -1: swapping is bad, the
3909 // subtract has already the same layout as the compare.
3910 // Each time Op0 is the second operand, count +1: swapping is good, the
3911 // subtract has a different layout as the compare.
3912 // At the end, if the benefit is greater than 0, Op0 should come second to
3913 // expose more CSE opportunities.
3914 int GlobalSwapBenefits = 0;
3915 for (const User *U : Op0->users()) {
3916 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
3917 if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
3919 // If Op0 is the first argument, this is not beneficial to swap the
3921 int LocalSwapBenefits = -1;
3922 unsigned Op1Idx = 1;
3923 if (BinOp->getOperand(Op1Idx) == Op0) {
3925 LocalSwapBenefits = 1;
3927 if (BinOp->getOperand(Op1Idx) != Op1)
3929 GlobalSwapBenefits += LocalSwapBenefits;
3931 return GlobalSwapBenefits > 0;
3934 /// \brief Check that one use is in the same block as the definition and all
3935 /// other uses are in blocks dominated by a given block.
3937 /// \param DI Definition
3939 /// \param DB Block that must dominate all uses of \p DI outside
3940 /// the parent block
3941 /// \return true when \p UI is the only use of \p DI in the parent block
3942 /// and all other uses of \p DI are in blocks dominated by \p DB.
3944 bool InstCombiner::dominatesAllUses(const Instruction *DI,
3945 const Instruction *UI,
3946 const BasicBlock *DB) const {
3947 assert(DI && UI && "Instruction not defined\n");
3948 // Ignore incomplete definitions.
3949 if (!DI->getParent())
3951 // DI and UI must be in the same block.
3952 if (DI->getParent() != UI->getParent())
3954 // Protect from self-referencing blocks.
3955 if (DI->getParent() == DB)
3957 for (const User *U : DI->users()) {
3958 auto *Usr = cast<Instruction>(U);
3959 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
3965 /// Return true when the instruction sequence within a block is select-cmp-br.
3966 static bool isChainSelectCmpBranch(const SelectInst *SI) {
3967 const BasicBlock *BB = SI->getParent();
3970 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
3971 if (!BI || BI->getNumSuccessors() != 2)
3973 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
3974 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
3979 /// \brief True when a select result is replaced by one of its operands
3980 /// in select-icmp sequence. This will eventually result in the elimination
3983 /// \param SI Select instruction
3984 /// \param Icmp Compare instruction
3985 /// \param SIOpd Operand that replaces the select
3988 /// - The replacement is global and requires dominator information
3989 /// - The caller is responsible for the actual replacement
3994 /// %4 = select i1 %3, %C* %0, %C* null
3995 /// %5 = icmp eq %C* %4, null
3996 /// br i1 %5, label %9, label %7
3998 /// ; <label>:7 ; preds = %entry
3999 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
4002 /// can be transformed to
4004 /// %5 = icmp eq %C* %0, null
4005 /// %6 = select i1 %3, i1 %5, i1 true
4006 /// br i1 %6, label %9, label %7
4008 /// ; <label>:7 ; preds = %entry
4009 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
4011 /// Similar when the first operand of the select is a constant or/and
4012 /// the compare is for not equal rather than equal.
4014 /// NOTE: The function is only called when the select and compare constants
4015 /// are equal, the optimization can work only for EQ predicates. This is not a
4016 /// major restriction since a NE compare should be 'normalized' to an equal
4017 /// compare, which usually happens in the combiner and test case
4018 /// select-cmp-br.ll checks for it.
4019 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
4020 const ICmpInst *Icmp,
4021 const unsigned SIOpd) {
4022 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
4023 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
4024 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
4025 // The check for the single predecessor is not the best that can be
4026 // done. But it protects efficiently against cases like when SI's
4027 // home block has two successors, Succ and Succ1, and Succ1 predecessor
4028 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
4029 // replaced can be reached on either path. So the uniqueness check
4030 // guarantees that the path all uses of SI (outside SI's parent) are on
4031 // is disjoint from all other paths out of SI. But that information
4032 // is more expensive to compute, and the trade-off here is in favor
4033 // of compile-time. It should also be noticed that we check for a single
4034 // predecessor and not only uniqueness. This to handle the situation when
4035 // Succ and Succ1 points to the same basic block.
4036 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
4038 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
4045 /// Try to fold the comparison based on range information we can get by checking
4046 /// whether bits are known to be zero or one in the inputs.
4047 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
4048 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4049 Type *Ty = Op0->getType();
4050 ICmpInst::Predicate Pred = I.getPredicate();
4052 // Get scalar or pointer size.
4053 unsigned BitWidth = Ty->isIntOrIntVectorTy()
4054 ? Ty->getScalarSizeInBits()
4055 : DL.getTypeSizeInBits(Ty->getScalarType());
4060 KnownBits Op0Known(BitWidth);
4061 KnownBits Op1Known(BitWidth);
4063 if (SimplifyDemandedBits(&I, 0,
4064 getDemandedBitsLHSMask(I, BitWidth),
4068 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
4072 // Given the known and unknown bits, compute a range that the LHS could be
4073 // in. Compute the Min, Max and RHS values based on the known bits. For the
4074 // EQ and NE we use unsigned values.
4075 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
4076 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
4078 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4079 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4081 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4082 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4085 // If Min and Max are known to be the same, then SimplifyDemandedBits
4086 // figured out that the LHS is a constant. Constant fold this now, so that
4087 // code below can assume that Min != Max.
4088 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
4089 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1);
4090 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
4091 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min));
4093 // Based on the range information we know about the LHS, see if we can
4094 // simplify this comparison. For example, (x&4) < 8 is always true.
4097 llvm_unreachable("Unknown icmp opcode!");
4098 case ICmpInst::ICMP_EQ:
4099 case ICmpInst::ICMP_NE: {
4100 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
4101 return Pred == CmpInst::ICMP_EQ
4102 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
4103 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4106 // If all bits are known zero except for one, then we know at most one bit
4107 // is set. If the comparison is against zero, then this is a check to see if
4108 // *that* bit is set.
4109 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
4110 if (Op1Known.isZero()) {
4111 // If the LHS is an AND with the same constant, look through it.
4112 Value *LHS = nullptr;
4114 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
4115 *LHSC != Op0KnownZeroInverted)
4119 if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
4120 APInt ValToCheck = Op0KnownZeroInverted;
4121 Type *XTy = X->getType();
4122 if (ValToCheck.isPowerOf2()) {
4123 // ((1 << X) & 8) == 0 -> X != 3
4124 // ((1 << X) & 8) != 0 -> X == 3
4125 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4126 auto NewPred = ICmpInst::getInversePredicate(Pred);
4127 return new ICmpInst(NewPred, X, CmpC);
4128 } else if ((++ValToCheck).isPowerOf2()) {
4129 // ((1 << X) & 7) == 0 -> X >= 3
4130 // ((1 << X) & 7) != 0 -> X < 3
4131 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4133 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
4134 return new ICmpInst(NewPred, X, CmpC);
4138 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
4140 if (Op0KnownZeroInverted.isOneValue() &&
4141 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
4142 // ((8 >>u X) & 1) == 0 -> X != 3
4143 // ((8 >>u X) & 1) != 0 -> X == 3
4144 unsigned CmpVal = CI->countTrailingZeros();
4145 auto NewPred = ICmpInst::getInversePredicate(Pred);
4146 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
4151 case ICmpInst::ICMP_ULT: {
4152 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
4153 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4154 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
4155 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4156 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
4157 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4160 if (match(Op1, m_APInt(CmpC))) {
4161 // A <u C -> A == C-1 if min(A)+1 == C
4162 if (*CmpC == Op0Min + 1)
4163 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4164 ConstantInt::get(Op1->getType(), *CmpC - 1));
4165 // X <u C --> X == 0, if the number of zero bits in the bottom of X
4166 // exceeds the log2 of C.
4167 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
4168 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4169 Constant::getNullValue(Op1->getType()));
4173 case ICmpInst::ICMP_UGT: {
4174 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
4175 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4176 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
4177 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4178 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
4179 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4182 if (match(Op1, m_APInt(CmpC))) {
4183 // A >u C -> A == C+1 if max(a)-1 == C
4184 if (*CmpC == Op0Max - 1)
4185 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4186 ConstantInt::get(Op1->getType(), *CmpC + 1));
4187 // X >u C --> X != 0, if the number of zero bits in the bottom of X
4188 // exceeds the log2 of C.
4189 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
4190 return new ICmpInst(ICmpInst::ICMP_NE, Op0,
4191 Constant::getNullValue(Op1->getType()));
4195 case ICmpInst::ICMP_SLT: {
4196 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
4197 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4198 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
4199 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4200 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
4201 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4203 if (match(Op1, m_APInt(CmpC))) {
4204 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
4205 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4206 ConstantInt::get(Op1->getType(), *CmpC - 1));
4210 case ICmpInst::ICMP_SGT: {
4211 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
4212 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4213 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
4214 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4215 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
4216 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4218 if (match(Op1, m_APInt(CmpC))) {
4219 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
4220 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4221 ConstantInt::get(Op1->getType(), *CmpC + 1));
4225 case ICmpInst::ICMP_SGE:
4226 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
4227 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
4228 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4229 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
4230 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4231 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
4232 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4234 case ICmpInst::ICMP_SLE:
4235 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
4236 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
4237 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4238 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
4239 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4240 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
4241 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4243 case ICmpInst::ICMP_UGE:
4244 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
4245 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
4246 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4247 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
4248 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4249 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
4250 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4252 case ICmpInst::ICMP_ULE:
4253 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
4254 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
4255 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4256 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
4257 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4258 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
4259 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4263 // Turn a signed comparison into an unsigned one if both operands are known to
4264 // have the same sign.
4266 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
4267 (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
4268 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
4273 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
4274 /// it into the appropriate icmp lt or icmp gt instruction. This transform
4275 /// allows them to be folded in visitICmpInst.
4276 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
4277 ICmpInst::Predicate Pred = I.getPredicate();
4278 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE &&
4279 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE)
4282 Value *Op0 = I.getOperand(0);
4283 Value *Op1 = I.getOperand(1);
4284 auto *Op1C = dyn_cast<Constant>(Op1);
4288 // Check if the constant operand can be safely incremented/decremented without
4289 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled
4290 // the edge cases for us, so we just assert on them. For vectors, we must
4291 // handle the edge cases.
4292 Type *Op1Type = Op1->getType();
4293 bool IsSigned = I.isSigned();
4294 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE);
4295 auto *CI = dyn_cast<ConstantInt>(Op1C);
4297 // A <= MAX -> TRUE ; A >= MIN -> TRUE
4298 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned));
4299 } else if (Op1Type->isVectorTy()) {
4300 // TODO? If the edge cases for vectors were guaranteed to be handled as they
4301 // are for scalar, we could remove the min/max checks. However, to do that,
4302 // we would have to use insertelement/shufflevector to replace edge values.
4303 unsigned NumElts = Op1Type->getVectorNumElements();
4304 for (unsigned i = 0; i != NumElts; ++i) {
4305 Constant *Elt = Op1C->getAggregateElement(i);
4309 if (isa<UndefValue>(Elt))
4312 // Bail out if we can't determine if this constant is min/max or if we
4313 // know that this constant is min/max.
4314 auto *CI = dyn_cast<ConstantInt>(Elt);
4315 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned)))
4323 // Increment or decrement the constant and set the new comparison predicate:
4324 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT
4325 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true);
4326 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT;
4327 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred;
4328 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne));
4331 /// Integer compare with boolean values can always be turned into bitwise ops.
4332 static Instruction *canonicalizeICmpBool(ICmpInst &I,
4333 InstCombiner::BuilderTy &Builder) {
4334 Value *A = I.getOperand(0), *B = I.getOperand(1);
4335 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
4337 // A boolean compared to true/false can be simplified to Op0/true/false in
4338 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
4339 // Cases not handled by InstSimplify are always 'not' of Op0.
4340 if (match(B, m_Zero())) {
4341 switch (I.getPredicate()) {
4342 case CmpInst::ICMP_EQ: // A == 0 -> !A
4343 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
4344 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
4345 return BinaryOperator::CreateNot(A);
4347 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
4349 } else if (match(B, m_One())) {
4350 switch (I.getPredicate()) {
4351 case CmpInst::ICMP_NE: // A != 1 -> !A
4352 case CmpInst::ICMP_ULT: // A <u 1 -> !A
4353 case CmpInst::ICMP_SGT: // A >s -1 -> !A
4354 return BinaryOperator::CreateNot(A);
4356 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
4360 switch (I.getPredicate()) {
4362 llvm_unreachable("Invalid icmp instruction!");
4363 case ICmpInst::ICMP_EQ:
4364 // icmp eq i1 A, B -> ~(A ^ B)
4365 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
4367 case ICmpInst::ICMP_NE:
4368 // icmp ne i1 A, B -> A ^ B
4369 return BinaryOperator::CreateXor(A, B);
4371 case ICmpInst::ICMP_UGT:
4372 // icmp ugt -> icmp ult
4375 case ICmpInst::ICMP_ULT:
4376 // icmp ult i1 A, B -> ~A & B
4377 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
4379 case ICmpInst::ICMP_SGT:
4380 // icmp sgt -> icmp slt
4383 case ICmpInst::ICMP_SLT:
4384 // icmp slt i1 A, B -> A & ~B
4385 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
4387 case ICmpInst::ICMP_UGE:
4388 // icmp uge -> icmp ule
4391 case ICmpInst::ICMP_ULE:
4392 // icmp ule i1 A, B -> ~A | B
4393 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
4395 case ICmpInst::ICMP_SGE:
4396 // icmp sge -> icmp sle
4399 case ICmpInst::ICMP_SLE:
4400 // icmp sle i1 A, B -> A | ~B
4401 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
4405 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
4406 bool Changed = false;
4407 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4408 unsigned Op0Cplxity = getComplexity(Op0);
4409 unsigned Op1Cplxity = getComplexity(Op1);
4411 /// Orders the operands of the compare so that they are listed from most
4412 /// complex to least complex. This puts constants before unary operators,
4413 /// before binary operators.
4414 if (Op0Cplxity < Op1Cplxity ||
4415 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
4417 std::swap(Op0, Op1);
4421 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1,
4422 SQ.getWithInstruction(&I)))
4423 return replaceInstUsesWith(I, V);
4425 // Comparing -val or val with non-zero is the same as just comparing val
4426 // ie, abs(val) != 0 -> val != 0
4427 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
4428 Value *Cond, *SelectTrue, *SelectFalse;
4429 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
4430 m_Value(SelectFalse)))) {
4431 if (Value *V = dyn_castNegVal(SelectTrue)) {
4432 if (V == SelectFalse)
4433 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4435 else if (Value *V = dyn_castNegVal(SelectFalse)) {
4436 if (V == SelectTrue)
4437 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4442 if (Op0->getType()->isIntOrIntVectorTy(1))
4443 if (Instruction *Res = canonicalizeICmpBool(I, Builder))
4446 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
4449 if (Instruction *Res = foldICmpWithConstant(I))
4452 if (Instruction *Res = foldICmpUsingKnownBits(I))
4455 // Test if the ICmpInst instruction is used exclusively by a select as
4456 // part of a minimum or maximum operation. If so, refrain from doing
4457 // any other folding. This helps out other analyses which understand
4458 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4459 // and CodeGen. And in this case, at least one of the comparison
4460 // operands has at least one user besides the compare (the select),
4461 // which would often largely negate the benefit of folding anyway.
4463 // Do the same for the other patterns recognized by matchSelectPattern.
4465 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
4467 SelectPatternResult SPR = matchSelectPattern(SI, A, B);
4468 if (SPR.Flavor != SPF_UNKNOWN)
4472 // Do this after checking for min/max to prevent infinite looping.
4473 if (Instruction *Res = foldICmpWithZero(I))
4476 // FIXME: We only do this after checking for min/max to prevent infinite
4477 // looping caused by a reverse canonicalization of these patterns for min/max.
4478 // FIXME: The organization of folds is a mess. These would naturally go into
4479 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
4480 // down here after the min/max restriction.
4481 ICmpInst::Predicate Pred = I.getPredicate();
4483 if (match(Op1, m_APInt(C))) {
4484 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
4485 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
4486 Constant *Zero = Constant::getNullValue(Op0->getType());
4487 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
4490 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
4491 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
4492 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
4493 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
4497 if (Instruction *Res = foldICmpInstWithConstant(I))
4500 if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
4503 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
4504 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
4505 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
4507 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
4508 if (Instruction *NI = foldGEPICmp(GEP, Op0,
4509 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
4512 // Try to optimize equality comparisons against alloca-based pointers.
4513 if (Op0->getType()->isPointerTy() && I.isEquality()) {
4514 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
4515 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
4516 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
4518 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
4519 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
4523 // Test to see if the operands of the icmp are casted versions of other
4524 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
4526 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
4527 if (Op0->getType()->isPointerTy() &&
4528 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
4529 // We keep moving the cast from the left operand over to the right
4530 // operand, where it can often be eliminated completely.
4531 Op0 = CI->getOperand(0);
4533 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
4534 // so eliminate it as well.
4535 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
4536 Op1 = CI2->getOperand(0);
4538 // If Op1 is a constant, we can fold the cast into the constant.
4539 if (Op0->getType() != Op1->getType()) {
4540 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
4541 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
4543 // Otherwise, cast the RHS right before the icmp
4544 Op1 = Builder.CreateBitCast(Op1, Op0->getType());
4547 return new ICmpInst(I.getPredicate(), Op0, Op1);
4551 if (isa<CastInst>(Op0)) {
4552 // Handle the special case of: icmp (cast bool to X), <cst>
4553 // This comes up when you have code like
4556 // For generality, we handle any zero-extension of any operand comparison
4557 // with a constant or another cast from the same type.
4558 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
4559 if (Instruction *R = foldICmpWithCastAndCast(I))
4563 if (Instruction *Res = foldICmpBinOp(I))
4566 if (Instruction *Res = foldICmpWithMinMax(I))
4571 // Transform (A & ~B) == 0 --> (A & B) != 0
4572 // and (A & ~B) != 0 --> (A & B) == 0
4573 // if A is a power of 2.
4574 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
4575 match(Op1, m_Zero()) &&
4576 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
4577 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
4580 // ~X < ~Y --> Y < X
4581 // ~X < C --> X > ~C
4582 if (match(Op0, m_Not(m_Value(A)))) {
4583 if (match(Op1, m_Not(m_Value(B))))
4584 return new ICmpInst(I.getPredicate(), B, A);
4587 if (match(Op1, m_APInt(C)))
4588 return new ICmpInst(I.getSwappedPredicate(), A,
4589 ConstantInt::get(Op1->getType(), ~(*C)));
4592 Instruction *AddI = nullptr;
4593 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
4594 m_Instruction(AddI))) &&
4595 isa<IntegerType>(A->getType())) {
4598 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
4600 replaceInstUsesWith(*AddI, Result);
4601 return replaceInstUsesWith(I, Overflow);
4605 // (zext a) * (zext b) --> llvm.umul.with.overflow.
4606 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4607 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
4610 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4611 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
4616 if (Instruction *Res = foldICmpEquality(I))
4619 // The 'cmpxchg' instruction returns an aggregate containing the old value and
4620 // an i1 which indicates whether or not we successfully did the swap.
4622 // Replace comparisons between the old value and the expected value with the
4623 // indicator that 'cmpxchg' returns.
4625 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
4626 // spuriously fail. In those cases, the old value may equal the expected
4627 // value but it is possible for the swap to not occur.
4628 if (I.getPredicate() == ICmpInst::ICMP_EQ)
4629 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
4630 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
4631 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
4633 return ExtractValueInst::Create(ACXI, 1);
4636 Value *X; ConstantInt *Cst;
4638 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
4639 return foldICmpAddOpConst(X, Cst, I.getPredicate());
4642 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
4643 return foldICmpAddOpConst(X, Cst, I.getSwappedPredicate());
4645 return Changed ? &I : nullptr;
4648 /// Fold fcmp ([us]itofp x, cst) if possible.
4649 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
4651 if (!isa<ConstantFP>(RHSC)) return nullptr;
4652 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
4654 // Get the width of the mantissa. We don't want to hack on conversions that
4655 // might lose information from the integer, e.g. "i64 -> float"
4656 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
4657 if (MantissaWidth == -1) return nullptr; // Unknown.
4659 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
4661 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
4663 if (I.isEquality()) {
4664 FCmpInst::Predicate P = I.getPredicate();
4665 bool IsExact = false;
4666 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
4667 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
4669 // If the floating point constant isn't an integer value, we know if we will
4670 // ever compare equal / not equal to it.
4672 // TODO: Can never be -0.0 and other non-representable values
4673 APFloat RHSRoundInt(RHS);
4674 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
4675 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
4676 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
4677 return replaceInstUsesWith(I, Builder.getFalse());
4679 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
4680 return replaceInstUsesWith(I, Builder.getTrue());
4684 // TODO: If the constant is exactly representable, is it always OK to do
4685 // equality compares as integer?
4688 // Check to see that the input is converted from an integer type that is small
4689 // enough that preserves all bits. TODO: check here for "known" sign bits.
4690 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
4691 unsigned InputSize = IntTy->getScalarSizeInBits();
4693 // Following test does NOT adjust InputSize downwards for signed inputs,
4694 // because the most negative value still requires all the mantissa bits
4695 // to distinguish it from one less than that value.
4696 if ((int)InputSize > MantissaWidth) {
4697 // Conversion would lose accuracy. Check if loss can impact comparison.
4698 int Exp = ilogb(RHS);
4699 if (Exp == APFloat::IEK_Inf) {
4700 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
4701 if (MaxExponent < (int)InputSize - !LHSUnsigned)
4702 // Conversion could create infinity.
4705 // Note that if RHS is zero or NaN, then Exp is negative
4706 // and first condition is trivially false.
4707 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
4708 // Conversion could affect comparison.
4713 // Otherwise, we can potentially simplify the comparison. We know that it
4714 // will always come through as an integer value and we know the constant is
4715 // not a NAN (it would have been previously simplified).
4716 assert(!RHS.isNaN() && "NaN comparison not already folded!");
4718 ICmpInst::Predicate Pred;
4719 switch (I.getPredicate()) {
4720 default: llvm_unreachable("Unexpected predicate!");
4721 case FCmpInst::FCMP_UEQ:
4722 case FCmpInst::FCMP_OEQ:
4723 Pred = ICmpInst::ICMP_EQ;
4725 case FCmpInst::FCMP_UGT:
4726 case FCmpInst::FCMP_OGT:
4727 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
4729 case FCmpInst::FCMP_UGE:
4730 case FCmpInst::FCMP_OGE:
4731 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
4733 case FCmpInst::FCMP_ULT:
4734 case FCmpInst::FCMP_OLT:
4735 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
4737 case FCmpInst::FCMP_ULE:
4738 case FCmpInst::FCMP_OLE:
4739 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
4741 case FCmpInst::FCMP_UNE:
4742 case FCmpInst::FCMP_ONE:
4743 Pred = ICmpInst::ICMP_NE;
4745 case FCmpInst::FCMP_ORD:
4746 return replaceInstUsesWith(I, Builder.getTrue());
4747 case FCmpInst::FCMP_UNO:
4748 return replaceInstUsesWith(I, Builder.getFalse());
4751 // Now we know that the APFloat is a normal number, zero or inf.
4753 // See if the FP constant is too large for the integer. For example,
4754 // comparing an i8 to 300.0.
4755 unsigned IntWidth = IntTy->getScalarSizeInBits();
4758 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
4759 // and large values.
4760 APFloat SMax(RHS.getSemantics());
4761 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
4762 APFloat::rmNearestTiesToEven);
4763 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
4764 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
4765 Pred == ICmpInst::ICMP_SLE)
4766 return replaceInstUsesWith(I, Builder.getTrue());
4767 return replaceInstUsesWith(I, Builder.getFalse());
4770 // If the RHS value is > UnsignedMax, fold the comparison. This handles
4771 // +INF and large values.
4772 APFloat UMax(RHS.getSemantics());
4773 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
4774 APFloat::rmNearestTiesToEven);
4775 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
4776 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
4777 Pred == ICmpInst::ICMP_ULE)
4778 return replaceInstUsesWith(I, Builder.getTrue());
4779 return replaceInstUsesWith(I, Builder.getFalse());
4784 // See if the RHS value is < SignedMin.
4785 APFloat SMin(RHS.getSemantics());
4786 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
4787 APFloat::rmNearestTiesToEven);
4788 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
4789 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
4790 Pred == ICmpInst::ICMP_SGE)
4791 return replaceInstUsesWith(I, Builder.getTrue());
4792 return replaceInstUsesWith(I, Builder.getFalse());
4795 // See if the RHS value is < UnsignedMin.
4796 APFloat SMin(RHS.getSemantics());
4797 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
4798 APFloat::rmNearestTiesToEven);
4799 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
4800 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
4801 Pred == ICmpInst::ICMP_UGE)
4802 return replaceInstUsesWith(I, Builder.getTrue());
4803 return replaceInstUsesWith(I, Builder.getFalse());
4807 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
4808 // [0, UMAX], but it may still be fractional. See if it is fractional by
4809 // casting the FP value to the integer value and back, checking for equality.
4810 // Don't do this for zero, because -0.0 is not fractional.
4811 Constant *RHSInt = LHSUnsigned
4812 ? ConstantExpr::getFPToUI(RHSC, IntTy)
4813 : ConstantExpr::getFPToSI(RHSC, IntTy);
4814 if (!RHS.isZero()) {
4815 bool Equal = LHSUnsigned
4816 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
4817 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
4819 // If we had a comparison against a fractional value, we have to adjust
4820 // the compare predicate and sometimes the value. RHSC is rounded towards
4821 // zero at this point.
4823 default: llvm_unreachable("Unexpected integer comparison!");
4824 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
4825 return replaceInstUsesWith(I, Builder.getTrue());
4826 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
4827 return replaceInstUsesWith(I, Builder.getFalse());
4828 case ICmpInst::ICMP_ULE:
4829 // (float)int <= 4.4 --> int <= 4
4830 // (float)int <= -4.4 --> false
4831 if (RHS.isNegative())
4832 return replaceInstUsesWith(I, Builder.getFalse());
4834 case ICmpInst::ICMP_SLE:
4835 // (float)int <= 4.4 --> int <= 4
4836 // (float)int <= -4.4 --> int < -4
4837 if (RHS.isNegative())
4838 Pred = ICmpInst::ICMP_SLT;
4840 case ICmpInst::ICMP_ULT:
4841 // (float)int < -4.4 --> false
4842 // (float)int < 4.4 --> int <= 4
4843 if (RHS.isNegative())
4844 return replaceInstUsesWith(I, Builder.getFalse());
4845 Pred = ICmpInst::ICMP_ULE;
4847 case ICmpInst::ICMP_SLT:
4848 // (float)int < -4.4 --> int < -4
4849 // (float)int < 4.4 --> int <= 4
4850 if (!RHS.isNegative())
4851 Pred = ICmpInst::ICMP_SLE;
4853 case ICmpInst::ICMP_UGT:
4854 // (float)int > 4.4 --> int > 4
4855 // (float)int > -4.4 --> true
4856 if (RHS.isNegative())
4857 return replaceInstUsesWith(I, Builder.getTrue());
4859 case ICmpInst::ICMP_SGT:
4860 // (float)int > 4.4 --> int > 4
4861 // (float)int > -4.4 --> int >= -4
4862 if (RHS.isNegative())
4863 Pred = ICmpInst::ICMP_SGE;
4865 case ICmpInst::ICMP_UGE:
4866 // (float)int >= -4.4 --> true
4867 // (float)int >= 4.4 --> int > 4
4868 if (RHS.isNegative())
4869 return replaceInstUsesWith(I, Builder.getTrue());
4870 Pred = ICmpInst::ICMP_UGT;
4872 case ICmpInst::ICMP_SGE:
4873 // (float)int >= -4.4 --> int >= -4
4874 // (float)int >= 4.4 --> int > 4
4875 if (!RHS.isNegative())
4876 Pred = ICmpInst::ICMP_SGT;
4882 // Lower this FP comparison into an appropriate integer version of the
4884 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
4887 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
4888 bool Changed = false;
4890 /// Orders the operands of the compare so that they are listed from most
4891 /// complex to least complex. This puts constants before unary operators,
4892 /// before binary operators.
4893 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
4898 const CmpInst::Predicate Pred = I.getPredicate();
4899 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4900 if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
4901 SQ.getWithInstruction(&I)))
4902 return replaceInstUsesWith(I, V);
4904 // Simplify 'fcmp pred X, X'
4908 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
4909 case FCmpInst::FCMP_ULT: // True if unordered or less than
4910 case FCmpInst::FCMP_UGT: // True if unordered or greater than
4911 case FCmpInst::FCMP_UNE: // True if unordered or not equal
4912 // Canonicalize these to be 'fcmp uno %X, 0.0'.
4913 I.setPredicate(FCmpInst::FCMP_UNO);
4914 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4917 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
4918 case FCmpInst::FCMP_OEQ: // True if ordered and equal
4919 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
4920 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
4921 // Canonicalize these to be 'fcmp ord %X, 0.0'.
4922 I.setPredicate(FCmpInst::FCMP_ORD);
4923 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4928 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
4929 // then canonicalize the operand to 0.0.
4930 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
4931 if (!match(Op0, m_Zero()) && isKnownNeverNaN(Op0)) {
4932 I.setOperand(0, ConstantFP::getNullValue(Op0->getType()));
4935 if (!match(Op1, m_Zero()) && isKnownNeverNaN(Op1)) {
4936 I.setOperand(1, ConstantFP::getNullValue(Op0->getType()));
4941 // Test if the FCmpInst instruction is used exclusively by a select as
4942 // part of a minimum or maximum operation. If so, refrain from doing
4943 // any other folding. This helps out other analyses which understand
4944 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4945 // and CodeGen. And in this case, at least one of the comparison
4946 // operands has at least one user besides the compare (the select),
4947 // which would often largely negate the benefit of folding anyway.
4949 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
4951 SelectPatternResult SPR = matchSelectPattern(SI, A, B);
4952 if (SPR.Flavor != SPF_UNKNOWN)
4956 // Handle fcmp with constant RHS
4957 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
4958 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
4959 switch (LHSI->getOpcode()) {
4960 case Instruction::FPExt: {
4961 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
4962 FPExtInst *LHSExt = cast<FPExtInst>(LHSI);
4963 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC);
4967 const fltSemantics *Sem;
4968 // FIXME: This shouldn't be here.
4969 if (LHSExt->getSrcTy()->isHalfTy())
4970 Sem = &APFloat::IEEEhalf();
4971 else if (LHSExt->getSrcTy()->isFloatTy())
4972 Sem = &APFloat::IEEEsingle();
4973 else if (LHSExt->getSrcTy()->isDoubleTy())
4974 Sem = &APFloat::IEEEdouble();
4975 else if (LHSExt->getSrcTy()->isFP128Ty())
4976 Sem = &APFloat::IEEEquad();
4977 else if (LHSExt->getSrcTy()->isX86_FP80Ty())
4978 Sem = &APFloat::x87DoubleExtended();
4979 else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
4980 Sem = &APFloat::PPCDoubleDouble();
4985 APFloat F = RHSF->getValueAPF();
4986 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy);
4988 // Avoid lossy conversions and denormals. Zero is a special case
4989 // that's OK to convert.
4993 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) !=
4994 APFloat::cmpLessThan) || Fabs.isZero()))
4996 return new FCmpInst(Pred, LHSExt->getOperand(0),
4997 ConstantFP::get(RHSC->getContext(), F));
5000 case Instruction::PHI:
5001 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5002 // block. If in the same block, we're encouraging jump threading. If
5003 // not, we are just pessimizing the code by making an i1 phi.
5004 if (LHSI->getParent() == I.getParent())
5005 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
5008 case Instruction::SIToFP:
5009 case Instruction::UIToFP:
5010 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
5013 case Instruction::FSub: {
5014 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
5016 if (match(LHSI, m_FNeg(m_Value(Op))))
5017 return new FCmpInst(I.getSwappedPredicate(), Op,
5018 ConstantExpr::getFNeg(RHSC));
5021 case Instruction::Load:
5022 if (GetElementPtrInst *GEP =
5023 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
5024 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
5025 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
5026 !cast<LoadInst>(LHSI)->isVolatile())
5027 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
5031 case Instruction::Call: {
5032 if (!RHSC->isNullValue())
5035 CallInst *CI = cast<CallInst>(LHSI);
5036 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI);
5037 if (IID != Intrinsic::fabs)
5040 // Various optimization for fabs compared with zero.
5044 // fabs(x) < 0 --> false
5045 case FCmpInst::FCMP_OLT:
5046 llvm_unreachable("handled by SimplifyFCmpInst");
5047 // fabs(x) > 0 --> x != 0
5048 case FCmpInst::FCMP_OGT:
5049 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
5050 // fabs(x) <= 0 --> x == 0
5051 case FCmpInst::FCMP_OLE:
5052 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC);
5053 // fabs(x) >= 0 --> !isnan(x)
5054 case FCmpInst::FCMP_OGE:
5055 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC);
5056 // fabs(x) == 0 --> x == 0
5057 // fabs(x) != 0 --> x != 0
5058 case FCmpInst::FCMP_OEQ:
5059 case FCmpInst::FCMP_UEQ:
5060 case FCmpInst::FCMP_ONE:
5061 case FCmpInst::FCMP_UNE:
5062 return new FCmpInst(Pred, CI->getArgOperand(0), RHSC);
5068 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
5070 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
5071 return new FCmpInst(I.getSwappedPredicate(), X, Y);
5073 // fcmp (fpext x), (fpext y) -> fcmp x, y
5074 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0))
5075 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1))
5076 if (LHSExt->getSrcTy() == RHSExt->getSrcTy())
5077 return new FCmpInst(Pred, LHSExt->getOperand(0), RHSExt->getOperand(0));
5079 return Changed ? &I : nullptr;