1 //===- InstCombineCompares.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitICmp and visitFCmp functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APSInt.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/GetElementPtrTypeIterator.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/KnownBits.h"
32 using namespace PatternMatch;
34 #define DEBUG_TYPE "instcombine"
36 // How many times is a select replaced by one of its operands?
37 STATISTIC(NumSel, "Number of select opts");
40 static ConstantInt *extractElement(Constant *V, Constant *Idx) {
41 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
44 static bool hasAddOverflow(ConstantInt *Result,
45 ConstantInt *In1, ConstantInt *In2,
48 return Result->getValue().ult(In1->getValue());
50 if (In2->isNegative())
51 return Result->getValue().sgt(In1->getValue());
52 return Result->getValue().slt(In1->getValue());
55 /// Compute Result = In1+In2, returning true if the result overflowed for this
57 static bool addWithOverflow(Constant *&Result, Constant *In1,
58 Constant *In2, bool IsSigned = false) {
59 Result = ConstantExpr::getAdd(In1, In2);
61 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
62 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
63 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
64 if (hasAddOverflow(extractElement(Result, Idx),
65 extractElement(In1, Idx),
66 extractElement(In2, Idx),
73 return hasAddOverflow(cast<ConstantInt>(Result),
74 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
78 static bool hasSubOverflow(ConstantInt *Result,
79 ConstantInt *In1, ConstantInt *In2,
82 return Result->getValue().ugt(In1->getValue());
84 if (In2->isNegative())
85 return Result->getValue().slt(In1->getValue());
87 return Result->getValue().sgt(In1->getValue());
90 /// Compute Result = In1-In2, returning true if the result overflowed for this
92 static bool subWithOverflow(Constant *&Result, Constant *In1,
93 Constant *In2, bool IsSigned = false) {
94 Result = ConstantExpr::getSub(In1, In2);
96 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
97 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
98 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
99 if (hasSubOverflow(extractElement(Result, Idx),
100 extractElement(In1, Idx),
101 extractElement(In2, Idx),
108 return hasSubOverflow(cast<ConstantInt>(Result),
109 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
113 /// Given an icmp instruction, return true if any use of this comparison is a
114 /// branch on sign bit comparison.
115 static bool isBranchOnSignBitCheck(ICmpInst &I, bool isSignBit) {
116 for (auto *U : I.users())
117 if (isa<BranchInst>(U))
122 /// Given an exploded icmp instruction, return true if the comparison only
123 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the
124 /// result of the comparison is true when the input value is signed.
125 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
126 bool &TrueIfSigned) {
128 case ICmpInst::ICMP_SLT: // True if LHS s< 0
131 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
133 return RHS.isAllOnesValue();
134 case ICmpInst::ICMP_SGT: // True if LHS s> -1
135 TrueIfSigned = false;
136 return RHS.isAllOnesValue();
137 case ICmpInst::ICMP_UGT:
138 // True if LHS u> RHS and RHS == high-bit-mask - 1
140 return RHS.isMaxSignedValue();
141 case ICmpInst::ICMP_UGE:
142 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
144 return RHS.isSignMask();
150 /// Returns true if the exploded icmp can be expressed as a signed comparison
151 /// to zero and updates the predicate accordingly.
152 /// The signedness of the comparison is preserved.
153 /// TODO: Refactor with decomposeBitTestICmp()?
154 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
155 if (!ICmpInst::isSigned(Pred))
159 return ICmpInst::isRelational(Pred);
162 if (Pred == ICmpInst::ICMP_SLT) {
163 Pred = ICmpInst::ICMP_SLE;
166 } else if (C.isAllOnesValue()) {
167 if (Pred == ICmpInst::ICMP_SGT) {
168 Pred = ICmpInst::ICMP_SGE;
176 /// Given a signed integer type and a set of known zero and one bits, compute
177 /// the maximum and minimum values that could have the specified known zero and
178 /// known one bits, returning them in Min/Max.
179 /// TODO: Move to method on KnownBits struct?
180 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
181 APInt &Min, APInt &Max) {
182 assert(Known.getBitWidth() == Min.getBitWidth() &&
183 Known.getBitWidth() == Max.getBitWidth() &&
184 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
185 APInt UnknownBits = ~(Known.Zero|Known.One);
187 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
188 // bit if it is unknown.
190 Max = Known.One|UnknownBits;
192 if (UnknownBits.isNegative()) { // Sign bit is unknown
198 /// Given an unsigned integer type and a set of known zero and one bits, compute
199 /// the maximum and minimum values that could have the specified known zero and
200 /// known one bits, returning them in Min/Max.
201 /// TODO: Move to method on KnownBits struct?
202 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
203 APInt &Min, APInt &Max) {
204 assert(Known.getBitWidth() == Min.getBitWidth() &&
205 Known.getBitWidth() == Max.getBitWidth() &&
206 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
207 APInt UnknownBits = ~(Known.Zero|Known.One);
209 // The minimum value is when the unknown bits are all zeros.
211 // The maximum value is when the unknown bits are all ones.
212 Max = Known.One|UnknownBits;
215 /// This is called when we see this pattern:
216 /// cmp pred (load (gep GV, ...)), cmpcst
217 /// where GV is a global variable with a constant initializer. Try to simplify
218 /// this into some simple computation that does not need the load. For example
219 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
221 /// If AndCst is non-null, then the loaded value is masked with that constant
222 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
223 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
226 ConstantInt *AndCst) {
227 Constant *Init = GV->getInitializer();
228 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
231 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
232 // Don't blow up on huge arrays.
233 if (ArrayElementCount > MaxArraySizeForCombine)
236 // There are many forms of this optimization we can handle, for now, just do
237 // the simple index into a single-dimensional array.
239 // Require: GEP GV, 0, i {{, constant indices}}
240 if (GEP->getNumOperands() < 3 ||
241 !isa<ConstantInt>(GEP->getOperand(1)) ||
242 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
243 isa<Constant>(GEP->getOperand(2)))
246 // Check that indices after the variable are constants and in-range for the
247 // type they index. Collect the indices. This is typically for arrays of
249 SmallVector<unsigned, 4> LaterIndices;
251 Type *EltTy = Init->getType()->getArrayElementType();
252 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
253 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
254 if (!Idx) return nullptr; // Variable index.
256 uint64_t IdxVal = Idx->getZExtValue();
257 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
259 if (StructType *STy = dyn_cast<StructType>(EltTy))
260 EltTy = STy->getElementType(IdxVal);
261 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
262 if (IdxVal >= ATy->getNumElements()) return nullptr;
263 EltTy = ATy->getElementType();
265 return nullptr; // Unknown type.
268 LaterIndices.push_back(IdxVal);
271 enum { Overdefined = -3, Undefined = -2 };
273 // Variables for our state machines.
275 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
276 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
277 // and 87 is the second (and last) index. FirstTrueElement is -2 when
278 // undefined, otherwise set to the first true element. SecondTrueElement is
279 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
280 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
282 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
283 // form "i != 47 & i != 87". Same state transitions as for true elements.
284 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
286 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
287 /// define a state machine that triggers for ranges of values that the index
288 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
289 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
290 /// index in the range (inclusive). We use -2 for undefined here because we
291 /// use relative comparisons and don't want 0-1 to match -1.
292 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
294 // MagicBitvector - This is a magic bitvector where we set a bit if the
295 // comparison is true for element 'i'. If there are 64 elements or less in
296 // the array, this will fully represent all the comparison results.
297 uint64_t MagicBitvector = 0;
299 // Scan the array and see if one of our patterns matches.
300 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
301 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
302 Constant *Elt = Init->getAggregateElement(i);
303 if (!Elt) return nullptr;
305 // If this is indexing an array of structures, get the structure element.
306 if (!LaterIndices.empty())
307 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
309 // If the element is masked, handle it.
310 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
312 // Find out if the comparison would be true or false for the i'th element.
313 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
314 CompareRHS, DL, &TLI);
315 // If the result is undef for this element, ignore it.
316 if (isa<UndefValue>(C)) {
317 // Extend range state machines to cover this element in case there is an
318 // undef in the middle of the range.
319 if (TrueRangeEnd == (int)i-1)
321 if (FalseRangeEnd == (int)i-1)
326 // If we can't compute the result for any of the elements, we have to give
327 // up evaluating the entire conditional.
328 if (!isa<ConstantInt>(C)) return nullptr;
330 // Otherwise, we know if the comparison is true or false for this element,
331 // update our state machines.
332 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
334 // State machine for single/double/range index comparison.
336 // Update the TrueElement state machine.
337 if (FirstTrueElement == Undefined)
338 FirstTrueElement = TrueRangeEnd = i; // First true element.
340 // Update double-compare state machine.
341 if (SecondTrueElement == Undefined)
342 SecondTrueElement = i;
344 SecondTrueElement = Overdefined;
346 // Update range state machine.
347 if (TrueRangeEnd == (int)i-1)
350 TrueRangeEnd = Overdefined;
353 // Update the FalseElement state machine.
354 if (FirstFalseElement == Undefined)
355 FirstFalseElement = FalseRangeEnd = i; // First false element.
357 // Update double-compare state machine.
358 if (SecondFalseElement == Undefined)
359 SecondFalseElement = i;
361 SecondFalseElement = Overdefined;
363 // Update range state machine.
364 if (FalseRangeEnd == (int)i-1)
367 FalseRangeEnd = Overdefined;
371 // If this element is in range, update our magic bitvector.
372 if (i < 64 && IsTrueForElt)
373 MagicBitvector |= 1ULL << i;
375 // If all of our states become overdefined, bail out early. Since the
376 // predicate is expensive, only check it every 8 elements. This is only
377 // really useful for really huge arrays.
378 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
379 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
380 FalseRangeEnd == Overdefined)
384 // Now that we've scanned the entire array, emit our new comparison(s). We
385 // order the state machines in complexity of the generated code.
386 Value *Idx = GEP->getOperand(2);
388 // If the index is larger than the pointer size of the target, truncate the
389 // index down like the GEP would do implicitly. We don't have to do this for
390 // an inbounds GEP because the index can't be out of range.
391 if (!GEP->isInBounds()) {
392 Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
393 unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
394 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
395 Idx = Builder->CreateTrunc(Idx, IntPtrTy);
398 // If the comparison is only true for one or two elements, emit direct
400 if (SecondTrueElement != Overdefined) {
401 // None true -> false.
402 if (FirstTrueElement == Undefined)
403 return replaceInstUsesWith(ICI, Builder->getFalse());
405 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
407 // True for one element -> 'i == 47'.
408 if (SecondTrueElement == Undefined)
409 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
411 // True for two elements -> 'i == 47 | i == 72'.
412 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
413 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
414 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
415 return BinaryOperator::CreateOr(C1, C2);
418 // If the comparison is only false for one or two elements, emit direct
420 if (SecondFalseElement != Overdefined) {
421 // None false -> true.
422 if (FirstFalseElement == Undefined)
423 return replaceInstUsesWith(ICI, Builder->getTrue());
425 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
427 // False for one element -> 'i != 47'.
428 if (SecondFalseElement == Undefined)
429 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
431 // False for two elements -> 'i != 47 & i != 72'.
432 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
433 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
434 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
435 return BinaryOperator::CreateAnd(C1, C2);
438 // If the comparison can be replaced with a range comparison for the elements
439 // where it is true, emit the range check.
440 if (TrueRangeEnd != Overdefined) {
441 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
443 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
444 if (FirstTrueElement) {
445 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
446 Idx = Builder->CreateAdd(Idx, Offs);
449 Value *End = ConstantInt::get(Idx->getType(),
450 TrueRangeEnd-FirstTrueElement+1);
451 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
454 // False range check.
455 if (FalseRangeEnd != Overdefined) {
456 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
457 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
458 if (FirstFalseElement) {
459 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
460 Idx = Builder->CreateAdd(Idx, Offs);
463 Value *End = ConstantInt::get(Idx->getType(),
464 FalseRangeEnd-FirstFalseElement);
465 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
468 // If a magic bitvector captures the entire comparison state
469 // of this load, replace it with computation that does:
470 // ((magic_cst >> i) & 1) != 0
474 // Look for an appropriate type:
475 // - The type of Idx if the magic fits
476 // - The smallest fitting legal type if we have a DataLayout
478 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
481 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
484 Value *V = Builder->CreateIntCast(Idx, Ty, false);
485 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
486 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
487 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
494 /// Return a value that can be used to compare the *offset* implied by a GEP to
495 /// zero. For example, if we have &A[i], we want to return 'i' for
496 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
497 /// are involved. The above expression would also be legal to codegen as
498 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
499 /// This latter form is less amenable to optimization though, and we are allowed
500 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
502 /// If we can't emit an optimized form for this expression, this returns null.
504 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
505 const DataLayout &DL) {
506 gep_type_iterator GTI = gep_type_begin(GEP);
508 // Check to see if this gep only has a single variable index. If so, and if
509 // any constant indices are a multiple of its scale, then we can compute this
510 // in terms of the scale of the variable index. For example, if the GEP
511 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
512 // because the expression will cross zero at the same point.
513 unsigned i, e = GEP->getNumOperands();
515 for (i = 1; i != e; ++i, ++GTI) {
516 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
517 // Compute the aggregate offset of constant indices.
518 if (CI->isZero()) continue;
520 // Handle a struct index, which adds its field offset to the pointer.
521 if (StructType *STy = GTI.getStructTypeOrNull()) {
522 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
524 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
525 Offset += Size*CI->getSExtValue();
528 // Found our variable index.
533 // If there are no variable indices, we must have a constant offset, just
534 // evaluate it the general way.
535 if (i == e) return nullptr;
537 Value *VariableIdx = GEP->getOperand(i);
538 // Determine the scale factor of the variable element. For example, this is
539 // 4 if the variable index is into an array of i32.
540 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
542 // Verify that there are no other variable indices. If so, emit the hard way.
543 for (++i, ++GTI; i != e; ++i, ++GTI) {
544 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
545 if (!CI) return nullptr;
547 // Compute the aggregate offset of constant indices.
548 if (CI->isZero()) continue;
550 // Handle a struct index, which adds its field offset to the pointer.
551 if (StructType *STy = GTI.getStructTypeOrNull()) {
552 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
554 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
555 Offset += Size*CI->getSExtValue();
559 // Okay, we know we have a single variable index, which must be a
560 // pointer/array/vector index. If there is no offset, life is simple, return
562 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
563 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
565 // Cast to intptrty in case a truncation occurs. If an extension is needed,
566 // we don't need to bother extending: the extension won't affect where the
567 // computation crosses zero.
568 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
569 VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
574 // Otherwise, there is an index. The computation we will do will be modulo
575 // the pointer size, so get it.
576 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
578 Offset &= PtrSizeMask;
579 VariableScale &= PtrSizeMask;
581 // To do this transformation, any constant index must be a multiple of the
582 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
583 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
584 // multiple of the variable scale.
585 int64_t NewOffs = Offset / (int64_t)VariableScale;
586 if (Offset != NewOffs*(int64_t)VariableScale)
589 // Okay, we can do this evaluation. Start by converting the index to intptr.
590 if (VariableIdx->getType() != IntPtrTy)
591 VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
593 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
594 return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset");
597 /// Returns true if we can rewrite Start as a GEP with pointer Base
598 /// and some integer offset. The nodes that need to be re-written
599 /// for this transformation will be added to Explored.
600 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
601 const DataLayout &DL,
602 SetVector<Value *> &Explored) {
603 SmallVector<Value *, 16> WorkList(1, Start);
604 Explored.insert(Base);
606 // The following traversal gives us an order which can be used
607 // when doing the final transformation. Since in the final
608 // transformation we create the PHI replacement instructions first,
609 // we don't have to get them in any particular order.
611 // However, for other instructions we will have to traverse the
612 // operands of an instruction first, which means that we have to
613 // do a post-order traversal.
614 while (!WorkList.empty()) {
615 SetVector<PHINode *> PHIs;
617 while (!WorkList.empty()) {
618 if (Explored.size() >= 100)
621 Value *V = WorkList.back();
623 if (Explored.count(V) != 0) {
628 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
629 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
630 // We've found some value that we can't explore which is different from
631 // the base. Therefore we can't do this transformation.
634 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
635 auto *CI = dyn_cast<CastInst>(V);
636 if (!CI->isNoopCast(DL))
639 if (Explored.count(CI->getOperand(0)) == 0)
640 WorkList.push_back(CI->getOperand(0));
643 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
644 // We're limiting the GEP to having one index. This will preserve
645 // the original pointer type. We could handle more cases in the
647 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
648 GEP->getType() != Start->getType())
651 if (Explored.count(GEP->getOperand(0)) == 0)
652 WorkList.push_back(GEP->getOperand(0));
655 if (WorkList.back() == V) {
657 // We've finished visiting this node, mark it as such.
661 if (auto *PN = dyn_cast<PHINode>(V)) {
662 // We cannot transform PHIs on unsplittable basic blocks.
663 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
670 // Explore the PHI nodes further.
671 for (auto *PN : PHIs)
672 for (Value *Op : PN->incoming_values())
673 if (Explored.count(Op) == 0)
674 WorkList.push_back(Op);
677 // Make sure that we can do this. Since we can't insert GEPs in a basic
678 // block before a PHI node, we can't easily do this transformation if
679 // we have PHI node users of transformed instructions.
680 for (Value *Val : Explored) {
681 for (Value *Use : Val->uses()) {
683 auto *PHI = dyn_cast<PHINode>(Use);
684 auto *Inst = dyn_cast<Instruction>(Val);
686 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
687 Explored.count(PHI) == 0)
690 if (PHI->getParent() == Inst->getParent())
697 // Sets the appropriate insert point on Builder where we can add
698 // a replacement Instruction for V (if that is possible).
699 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
700 bool Before = true) {
701 if (auto *PHI = dyn_cast<PHINode>(V)) {
702 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
705 if (auto *I = dyn_cast<Instruction>(V)) {
707 I = &*std::next(I->getIterator());
708 Builder.SetInsertPoint(I);
711 if (auto *A = dyn_cast<Argument>(V)) {
712 // Set the insertion point in the entry block.
713 BasicBlock &Entry = A->getParent()->getEntryBlock();
714 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
717 // Otherwise, this is a constant and we don't need to set a new
719 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
722 /// Returns a re-written value of Start as an indexed GEP using Base as a
724 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
725 const DataLayout &DL,
726 SetVector<Value *> &Explored) {
727 // Perform all the substitutions. This is a bit tricky because we can
728 // have cycles in our use-def chains.
729 // 1. Create the PHI nodes without any incoming values.
730 // 2. Create all the other values.
731 // 3. Add the edges for the PHI nodes.
732 // 4. Emit GEPs to get the original pointers.
733 // 5. Remove the original instructions.
734 Type *IndexType = IntegerType::get(
735 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType()));
737 DenseMap<Value *, Value *> NewInsts;
738 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
740 // Create the new PHI nodes, without adding any incoming values.
741 for (Value *Val : Explored) {
744 // Create empty phi nodes. This avoids cyclic dependencies when creating
745 // the remaining instructions.
746 if (auto *PHI = dyn_cast<PHINode>(Val))
747 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
748 PHI->getName() + ".idx", PHI);
750 IRBuilder<> Builder(Base->getContext());
752 // Create all the other instructions.
753 for (Value *Val : Explored) {
755 if (NewInsts.find(Val) != NewInsts.end())
758 if (auto *CI = dyn_cast<CastInst>(Val)) {
759 NewInsts[CI] = NewInsts[CI->getOperand(0)];
762 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
763 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
764 : GEP->getOperand(1);
765 setInsertionPoint(Builder, GEP);
766 // Indices might need to be sign extended. GEPs will magically do
767 // this, but we need to do it ourselves here.
768 if (Index->getType()->getScalarSizeInBits() !=
769 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
770 Index = Builder.CreateSExtOrTrunc(
771 Index, NewInsts[GEP->getOperand(0)]->getType(),
772 GEP->getOperand(0)->getName() + ".sext");
775 auto *Op = NewInsts[GEP->getOperand(0)];
776 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero())
777 NewInsts[GEP] = Index;
779 NewInsts[GEP] = Builder.CreateNSWAdd(
780 Op, Index, GEP->getOperand(0)->getName() + ".add");
783 if (isa<PHINode>(Val))
786 llvm_unreachable("Unexpected instruction type");
789 // Add the incoming values to the PHI nodes.
790 for (Value *Val : Explored) {
793 // All the instructions have been created, we can now add edges to the
795 if (auto *PHI = dyn_cast<PHINode>(Val)) {
796 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
797 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
798 Value *NewIncoming = PHI->getIncomingValue(I);
800 if (NewInsts.find(NewIncoming) != NewInsts.end())
801 NewIncoming = NewInsts[NewIncoming];
803 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
808 for (Value *Val : Explored) {
812 // Depending on the type, for external users we have to emit
813 // a GEP or a GEP + ptrtoint.
814 setInsertionPoint(Builder, Val, false);
816 // If required, create an inttoptr instruction for Base.
817 Value *NewBase = Base;
818 if (!Base->getType()->isPointerTy())
819 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
820 Start->getName() + "to.ptr");
822 Value *GEP = Builder.CreateInBoundsGEP(
823 Start->getType()->getPointerElementType(), NewBase,
824 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
826 if (!Val->getType()->isPointerTy()) {
827 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
828 Val->getName() + ".conv");
831 Val->replaceAllUsesWith(GEP);
834 return NewInsts[Start];
837 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
838 /// the input Value as a constant indexed GEP. Returns a pair containing
839 /// the GEPs Pointer and Index.
840 static std::pair<Value *, Value *>
841 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
842 Type *IndexType = IntegerType::get(V->getContext(),
843 DL.getPointerTypeSizeInBits(V->getType()));
845 Constant *Index = ConstantInt::getNullValue(IndexType);
847 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
848 // We accept only inbouds GEPs here to exclude the possibility of
850 if (!GEP->isInBounds())
852 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
853 GEP->getType() == V->getType()) {
854 V = GEP->getOperand(0);
855 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
856 Index = ConstantExpr::getAdd(
857 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
862 if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
863 if (!CI->isNoopCast(DL))
865 V = CI->getOperand(0);
868 if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
869 if (!CI->isNoopCast(DL))
871 V = CI->getOperand(0);
879 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
880 /// We can look through PHIs, GEPs and casts in order to determine a common base
881 /// between GEPLHS and RHS.
882 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
883 ICmpInst::Predicate Cond,
884 const DataLayout &DL) {
885 if (!GEPLHS->hasAllConstantIndices())
888 // Make sure the pointers have the same type.
889 if (GEPLHS->getType() != RHS->getType())
892 Value *PtrBase, *Index;
893 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
895 // The set of nodes that will take part in this transformation.
896 SetVector<Value *> Nodes;
898 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
901 // We know we can re-write this as
902 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
903 // Since we've only looked through inbouds GEPs we know that we
904 // can't have overflow on either side. We can therefore re-write
906 // OFFSET1 cmp OFFSET2
907 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
909 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
910 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
911 // offset. Since Index is the offset of LHS to the base pointer, we will now
912 // compare the offsets instead of comparing the pointers.
913 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
916 /// Fold comparisons between a GEP instruction and something else. At this point
917 /// we know that the GEP is on the LHS of the comparison.
918 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
919 ICmpInst::Predicate Cond,
921 // Don't transform signed compares of GEPs into index compares. Even if the
922 // GEP is inbounds, the final add of the base pointer can have signed overflow
923 // and would change the result of the icmp.
924 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
925 // the maximum signed value for the pointer type.
926 if (ICmpInst::isSigned(Cond))
929 // Look through bitcasts and addrspacecasts. We do not however want to remove
931 if (!isa<GetElementPtrInst>(RHS))
932 RHS = RHS->stripPointerCasts();
934 Value *PtrBase = GEPLHS->getOperand(0);
935 if (PtrBase == RHS && GEPLHS->isInBounds()) {
936 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
937 // This transformation (ignoring the base and scales) is valid because we
938 // know pointers can't overflow since the gep is inbounds. See if we can
939 // output an optimized form.
940 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
942 // If not, synthesize the offset the hard way.
944 Offset = EmitGEPOffset(GEPLHS);
945 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
946 Constant::getNullValue(Offset->getType()));
947 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
948 // If the base pointers are different, but the indices are the same, just
949 // compare the base pointer.
950 if (PtrBase != GEPRHS->getOperand(0)) {
951 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
952 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
953 GEPRHS->getOperand(0)->getType();
955 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
956 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
957 IndicesTheSame = false;
961 // If all indices are the same, just compare the base pointers.
963 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
965 // If we're comparing GEPs with two base pointers that only differ in type
966 // and both GEPs have only constant indices or just one use, then fold
967 // the compare with the adjusted indices.
968 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
969 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
970 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
971 PtrBase->stripPointerCasts() ==
972 GEPRHS->getOperand(0)->stripPointerCasts()) {
973 Value *LOffset = EmitGEPOffset(GEPLHS);
974 Value *ROffset = EmitGEPOffset(GEPRHS);
976 // If we looked through an addrspacecast between different sized address
977 // spaces, the LHS and RHS pointers are different sized
978 // integers. Truncate to the smaller one.
979 Type *LHSIndexTy = LOffset->getType();
980 Type *RHSIndexTy = ROffset->getType();
981 if (LHSIndexTy != RHSIndexTy) {
982 if (LHSIndexTy->getPrimitiveSizeInBits() <
983 RHSIndexTy->getPrimitiveSizeInBits()) {
984 ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
986 LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
989 Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
991 return replaceInstUsesWith(I, Cmp);
994 // Otherwise, the base pointers are different and the indices are
995 // different. Try convert this to an indexed compare by looking through
997 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1000 // If one of the GEPs has all zero indices, recurse.
1001 if (GEPLHS->hasAllZeroIndices())
1002 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
1003 ICmpInst::getSwappedPredicate(Cond), I);
1005 // If the other GEP has all zero indices, recurse.
1006 if (GEPRHS->hasAllZeroIndices())
1007 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
1009 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
1010 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
1011 // If the GEPs only differ by one index, compare it.
1012 unsigned NumDifferences = 0; // Keep track of # differences.
1013 unsigned DiffOperand = 0; // The operand that differs.
1014 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
1015 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
1016 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
1017 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
1018 // Irreconcilable differences.
1022 if (NumDifferences++) break;
1027 if (NumDifferences == 0) // SAME GEP?
1028 return replaceInstUsesWith(I, // No comparison is needed here.
1029 Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond)));
1031 else if (NumDifferences == 1 && GEPsInBounds) {
1032 Value *LHSV = GEPLHS->getOperand(DiffOperand);
1033 Value *RHSV = GEPRHS->getOperand(DiffOperand);
1034 // Make sure we do a signed comparison here.
1035 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
1039 // Only lower this if the icmp is the only user of the GEP or if we expect
1040 // the result to fold to a constant!
1041 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
1042 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
1043 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
1044 Value *L = EmitGEPOffset(GEPLHS);
1045 Value *R = EmitGEPOffset(GEPRHS);
1046 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1050 // Try convert this to an indexed compare by looking through PHIs/casts as a
1052 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1055 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI,
1056 const AllocaInst *Alloca,
1057 const Value *Other) {
1058 assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1060 // It would be tempting to fold away comparisons between allocas and any
1061 // pointer not based on that alloca (e.g. an argument). However, even
1062 // though such pointers cannot alias, they can still compare equal.
1064 // But LLVM doesn't specify where allocas get their memory, so if the alloca
1065 // doesn't escape we can argue that it's impossible to guess its value, and we
1066 // can therefore act as if any such guesses are wrong.
1068 // The code below checks that the alloca doesn't escape, and that it's only
1069 // used in a comparison once (the current instruction). The
1070 // single-comparison-use condition ensures that we're trivially folding all
1071 // comparisons against the alloca consistently, and avoids the risk of
1072 // erroneously folding a comparison of the pointer with itself.
1074 unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1076 SmallVector<const Use *, 32> Worklist;
1077 for (const Use &U : Alloca->uses()) {
1078 if (Worklist.size() >= MaxIter)
1080 Worklist.push_back(&U);
1083 unsigned NumCmps = 0;
1084 while (!Worklist.empty()) {
1085 assert(Worklist.size() <= MaxIter);
1086 const Use *U = Worklist.pop_back_val();
1087 const Value *V = U->getUser();
1090 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1091 isa<SelectInst>(V)) {
1093 } else if (isa<LoadInst>(V)) {
1094 // Loading from the pointer doesn't escape it.
1096 } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1097 // Storing *to* the pointer is fine, but storing the pointer escapes it.
1098 if (SI->getValueOperand() == U->get())
1101 } else if (isa<ICmpInst>(V)) {
1103 return nullptr; // Found more than one cmp.
1105 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1106 switch (Intrin->getIntrinsicID()) {
1107 // These intrinsics don't escape or compare the pointer. Memset is safe
1108 // because we don't allow ptrtoint. Memcpy and memmove are safe because
1109 // we don't allow stores, so src cannot point to V.
1110 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1111 case Intrinsic::dbg_declare: case Intrinsic::dbg_value:
1112 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1120 for (const Use &U : V->uses()) {
1121 if (Worklist.size() >= MaxIter)
1123 Worklist.push_back(&U);
1127 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1128 return replaceInstUsesWith(
1130 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1133 /// Fold "icmp pred (X+CI), X".
1134 Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI,
1135 Value *X, ConstantInt *CI,
1136 ICmpInst::Predicate Pred) {
1137 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1138 // so the values can never be equal. Similarly for all other "or equals"
1141 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
1142 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
1143 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
1144 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1146 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
1147 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1150 // (X+1) >u X --> X <u (0-1) --> X != 255
1151 // (X+2) >u X --> X <u (0-2) --> X <u 254
1152 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
1153 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1154 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
1156 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
1157 ConstantInt *SMax = ConstantInt::get(X->getContext(),
1158 APInt::getSignedMaxValue(BitWidth));
1160 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
1161 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
1162 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
1163 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
1164 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
1165 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
1166 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1167 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
1169 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
1170 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
1171 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1172 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1173 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
1174 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
1176 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1177 Constant *C = Builder->getInt(CI->getValue()-1);
1178 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
1181 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1182 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1183 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1184 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A,
1187 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1189 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1190 if (I.getPredicate() == I.ICMP_NE)
1191 Pred = CmpInst::getInversePredicate(Pred);
1192 return new ICmpInst(Pred, LHS, RHS);
1195 // Don't bother doing any work for cases which InstSimplify handles.
1199 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1201 if (AP2.isAllOnesValue())
1203 if (AP2.isNegative() != AP1.isNegative())
1210 // 'A' must be large enough to shift out the highest set bit.
1211 return getICmp(I.ICMP_UGT, A,
1212 ConstantInt::get(A->getType(), AP2.logBase2()));
1215 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1218 if (IsAShr && AP1.isNegative())
1219 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1221 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1224 if (IsAShr && AP1 == AP2.ashr(Shift)) {
1225 // There are multiple solutions if we are comparing against -1 and the LHS
1226 // of the ashr is not a power of two.
1227 if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1228 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1229 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1230 } else if (AP1 == AP2.lshr(Shift)) {
1231 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1235 // Shifting const2 will never be equal to const1.
1236 // FIXME: This should always be handled by InstSimplify?
1237 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1238 return replaceInstUsesWith(I, TorF);
1241 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1242 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1243 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A,
1246 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1248 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1249 if (I.getPredicate() == I.ICMP_NE)
1250 Pred = CmpInst::getInversePredicate(Pred);
1251 return new ICmpInst(Pred, LHS, RHS);
1254 // Don't bother doing any work for cases which InstSimplify handles.
1258 unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1260 if (!AP1 && AP2TrailingZeros != 0)
1263 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1266 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1268 // Get the distance between the lowest bits that are set.
1269 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1271 if (Shift > 0 && AP2.shl(Shift) == AP1)
1272 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1274 // Shifting const2 will never be equal to const1.
1275 // FIXME: This should always be handled by InstSimplify?
1276 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1277 return replaceInstUsesWith(I, TorF);
1280 /// The caller has matched a pattern of the form:
1281 /// I = icmp ugt (add (add A, B), CI2), CI1
1282 /// If this is of the form:
1284 /// if (sum+128 >u 255)
1285 /// Then replace it with llvm.sadd.with.overflow.i8.
1287 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1288 ConstantInt *CI2, ConstantInt *CI1,
1290 // The transformation we're trying to do here is to transform this into an
1291 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1292 // with a narrower add, and discard the add-with-constant that is part of the
1293 // range check (if we can't eliminate it, this isn't profitable).
1295 // In order to eliminate the add-with-constant, the compare can be its only
1297 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1298 if (!AddWithCst->hasOneUse())
1301 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1302 if (!CI2->getValue().isPowerOf2())
1304 unsigned NewWidth = CI2->getValue().countTrailingZeros();
1305 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1308 // The width of the new add formed is 1 more than the bias.
1311 // Check to see that CI1 is an all-ones value with NewWidth bits.
1312 if (CI1->getBitWidth() == NewWidth ||
1313 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1316 // This is only really a signed overflow check if the inputs have been
1317 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1318 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1319 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1320 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1321 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1324 // In order to replace the original add with a narrower
1325 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1326 // and truncates that discard the high bits of the add. Verify that this is
1328 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1329 for (User *U : OrigAdd->users()) {
1330 if (U == AddWithCst)
1333 // Only accept truncates for now. We would really like a nice recursive
1334 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1335 // chain to see which bits of a value are actually demanded. If the
1336 // original add had another add which was then immediately truncated, we
1337 // could still do the transformation.
1338 TruncInst *TI = dyn_cast<TruncInst>(U);
1339 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1343 // If the pattern matches, truncate the inputs to the narrower type and
1344 // use the sadd_with_overflow intrinsic to efficiently compute both the
1345 // result and the overflow bit.
1346 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1347 Value *F = Intrinsic::getDeclaration(I.getModule(),
1348 Intrinsic::sadd_with_overflow, NewType);
1350 InstCombiner::BuilderTy *Builder = IC.Builder;
1352 // Put the new code above the original add, in case there are any uses of the
1353 // add between the add and the compare.
1354 Builder->SetInsertPoint(OrigAdd);
1356 Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName() + ".trunc");
1357 Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName() + ".trunc");
1358 CallInst *Call = Builder->CreateCall(F, {TruncA, TruncB}, "sadd");
1359 Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result");
1360 Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType());
1362 // The inner add was the result of the narrow add, zero extended to the
1363 // wider type. Replace it with the result computed by the intrinsic.
1364 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1366 // The original icmp gets replaced with the overflow value.
1367 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1370 // Fold icmp Pred X, C.
1371 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
1372 CmpInst::Predicate Pred = Cmp.getPredicate();
1373 Value *X = Cmp.getOperand(0);
1376 if (!match(Cmp.getOperand(1), m_APInt(C)))
1379 Value *A = nullptr, *B = nullptr;
1381 // Match the following pattern, which is a common idiom when writing
1382 // overflow-safe integer arithmetic functions. The source performs an addition
1383 // in wider type and explicitly checks for overflow using comparisons against
1384 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1386 // TODO: This could probably be generalized to handle other overflow-safe
1387 // operations if we worked out the formulas to compute the appropriate magic
1391 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1393 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1394 if (Pred == ICmpInst::ICMP_UGT &&
1395 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1396 if (Instruction *Res = processUGT_ADDCST_ADD(
1397 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this))
1401 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1402 if (*C == 0 && Pred == ICmpInst::ICMP_SGT) {
1403 SelectPatternResult SPR = matchSelectPattern(X, A, B);
1404 if (SPR.Flavor == SPF_SMIN) {
1405 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1406 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1407 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1408 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1412 // FIXME: Use m_APInt to allow folds for splat constants.
1413 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1));
1417 // Canonicalize icmp instructions based on dominating conditions.
1418 BasicBlock *Parent = Cmp.getParent();
1419 BasicBlock *Dom = Parent->getSinglePredecessor();
1420 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr;
1421 ICmpInst::Predicate Pred2;
1422 BasicBlock *TrueBB, *FalseBB;
1424 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)),
1425 TrueBB, FalseBB)) &&
1426 TrueBB != FalseBB) {
1428 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue());
1429 ConstantRange DominatingCR =
1431 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue())
1432 : ConstantRange::makeExactICmpRegion(
1433 CmpInst::getInversePredicate(Pred2), CI2->getValue());
1434 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1435 ConstantRange Difference = DominatingCR.difference(CR);
1436 if (Intersection.isEmptySet())
1437 return replaceInstUsesWith(Cmp, Builder->getFalse());
1438 if (Difference.isEmptySet())
1439 return replaceInstUsesWith(Cmp, Builder->getTrue());
1441 // If this is a normal comparison, it demands all bits. If it is a sign
1442 // bit comparison, it only demands the sign bit.
1444 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit);
1446 // Canonicalizing a sign bit comparison that gets used in a branch,
1447 // pessimizes codegen by generating branch on zero instruction instead
1448 // of a test and branch. So we avoid canonicalizing in such situations
1449 // because test and branch instruction has better branch displacement
1450 // than compare and branch instruction.
1451 if (!isBranchOnSignBitCheck(Cmp, IsSignBit) && !Cmp.isEquality()) {
1452 if (auto *AI = Intersection.getSingleElement())
1453 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder->getInt(*AI));
1454 if (auto *AD = Difference.getSingleElement())
1455 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder->getInt(*AD));
1462 /// Fold icmp (trunc X, Y), C.
1463 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
1466 ICmpInst::Predicate Pred = Cmp.getPredicate();
1467 Value *X = Trunc->getOperand(0);
1468 if (*C == 1 && C->getBitWidth() > 1) {
1469 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1471 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1472 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1473 ConstantInt::get(V->getType(), 1));
1476 if (Cmp.isEquality() && Trunc->hasOneUse()) {
1477 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1478 // of the high bits truncated out of x are known.
1479 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1480 SrcBits = X->getType()->getScalarSizeInBits();
1481 KnownBits Known = computeKnownBits(X, 0, &Cmp);
1483 // If all the high bits are known, we can do this xform.
1484 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1485 // Pull in the high bits from known-ones set.
1486 APInt NewRHS = C->zext(SrcBits);
1487 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1488 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1495 /// Fold icmp (xor X, Y), C.
1496 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
1497 BinaryOperator *Xor,
1499 Value *X = Xor->getOperand(0);
1500 Value *Y = Xor->getOperand(1);
1502 if (!match(Y, m_APInt(XorC)))
1505 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1507 ICmpInst::Predicate Pred = Cmp.getPredicate();
1508 if ((Pred == ICmpInst::ICMP_SLT && *C == 0) ||
1509 (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())) {
1511 // If the sign bit of the XorCst is not set, there is no change to
1512 // the operation, just stop using the Xor.
1513 if (!XorC->isNegative()) {
1514 Cmp.setOperand(0, X);
1519 // Was the old condition true if the operand is positive?
1520 bool isTrueIfPositive = Pred == ICmpInst::ICMP_SGT;
1522 // If so, the new one isn't.
1523 isTrueIfPositive ^= true;
1525 Constant *CmpConstant = cast<Constant>(Cmp.getOperand(1));
1526 if (isTrueIfPositive)
1527 return new ICmpInst(ICmpInst::ICMP_SGT, X, SubOne(CmpConstant));
1529 return new ICmpInst(ICmpInst::ICMP_SLT, X, AddOne(CmpConstant));
1532 if (Xor->hasOneUse()) {
1533 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1534 if (!Cmp.isEquality() && XorC->isSignMask()) {
1535 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1536 : Cmp.getSignedPredicate();
1537 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
1540 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1541 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1542 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1543 : Cmp.getSignedPredicate();
1544 Pred = Cmp.getSwappedPredicate(Pred);
1545 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
1549 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
1550 // iff -C is a power of 2
1551 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~(*C) && (*C + 1).isPowerOf2())
1552 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1554 // (icmp ult (xor X, C), -C) -> (icmp uge X, C)
1555 // iff -C is a power of 2
1556 if (Pred == ICmpInst::ICMP_ULT && *XorC == -(*C) && C->isPowerOf2())
1557 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
1562 /// Fold icmp (and (sh X, Y), C2), C1.
1563 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
1564 const APInt *C1, const APInt *C2) {
1565 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1566 if (!Shift || !Shift->isShift())
1569 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1570 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1571 // code produced by the clang front-end, for bitfield access.
1572 // This seemingly simple opportunity to fold away a shift turns out to be
1573 // rather complicated. See PR17827 for details.
1574 unsigned ShiftOpcode = Shift->getOpcode();
1575 bool IsShl = ShiftOpcode == Instruction::Shl;
1577 if (match(Shift->getOperand(1), m_APInt(C3))) {
1578 bool CanFold = false;
1579 if (ShiftOpcode == Instruction::AShr) {
1580 // There may be some constraints that make this possible, but nothing
1581 // simple has been discovered yet.
1583 } else if (ShiftOpcode == Instruction::Shl) {
1584 // For a left shift, we can fold if the comparison is not signed. We can
1585 // also fold a signed comparison if the mask value and comparison value
1586 // are not negative. These constraints may not be obvious, but we can
1587 // prove that they are correct using an SMT solver.
1588 if (!Cmp.isSigned() || (!C2->isNegative() && !C1->isNegative()))
1590 } else if (ShiftOpcode == Instruction::LShr) {
1591 // For a logical right shift, we can fold if the comparison is not signed.
1592 // We can also fold a signed comparison if the shifted mask value and the
1593 // shifted comparison value are not negative. These constraints may not be
1594 // obvious, but we can prove that they are correct using an SMT solver.
1595 if (!Cmp.isSigned() ||
1596 (!C2->shl(*C3).isNegative() && !C1->shl(*C3).isNegative()))
1601 APInt NewCst = IsShl ? C1->lshr(*C3) : C1->shl(*C3);
1602 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
1603 // Check to see if we are shifting out any of the bits being compared.
1604 if (SameAsC1 != *C1) {
1605 // If we shifted bits out, the fold is not going to work out. As a
1606 // special case, check to see if this means that the result is always
1607 // true or false now.
1608 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1609 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1610 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1611 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1613 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
1614 APInt NewAndCst = IsShl ? C2->lshr(*C3) : C2->shl(*C3);
1615 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
1616 And->setOperand(0, Shift->getOperand(0));
1617 Worklist.Add(Shift); // Shift is dead.
1623 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1624 // preferable because it allows the C2 << Y expression to be hoisted out of a
1625 // loop if Y is invariant and X is not.
1626 if (Shift->hasOneUse() && *C1 == 0 && Cmp.isEquality() &&
1627 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1630 IsShl ? Builder->CreateLShr(And->getOperand(1), Shift->getOperand(1))
1631 : Builder->CreateShl(And->getOperand(1), Shift->getOperand(1));
1633 // Compute X & (C2 << Y).
1634 Value *NewAnd = Builder->CreateAnd(Shift->getOperand(0), NewShift);
1635 Cmp.setOperand(0, NewAnd);
1642 /// Fold icmp (and X, C2), C1.
1643 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
1644 BinaryOperator *And,
1647 if (!match(And->getOperand(1), m_APInt(C2)))
1650 if (!And->hasOneUse() || !And->getOperand(0)->hasOneUse())
1653 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1654 // the input width without changing the value produced, eliminate the cast:
1656 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1658 // We can do this transformation if the constants do not have their sign bits
1659 // set or if it is an equality comparison. Extending a relational comparison
1660 // when we're checking the sign bit would not work.
1662 if (match(And->getOperand(0), m_Trunc(m_Value(W))) &&
1663 (Cmp.isEquality() || (!C1->isNegative() && !C2->isNegative()))) {
1664 // TODO: Is this a good transform for vectors? Wider types may reduce
1665 // throughput. Should this transform be limited (even for scalars) by using
1666 // shouldChangeType()?
1667 if (!Cmp.getType()->isVectorTy()) {
1668 Type *WideType = W->getType();
1669 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1670 Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits));
1671 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1672 Value *NewAnd = Builder->CreateAnd(W, ZextC2, And->getName());
1673 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1677 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, C2))
1680 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1681 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1683 // iff pred isn't signed
1684 if (!Cmp.isSigned() && *C1 == 0 && match(And->getOperand(1), m_One())) {
1685 Constant *One = cast<Constant>(And->getOperand(1));
1686 Value *Or = And->getOperand(0);
1687 Value *A, *B, *LShr;
1688 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1689 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1690 unsigned UsesRemoved = 0;
1691 if (And->hasOneUse())
1693 if (Or->hasOneUse())
1695 if (LShr->hasOneUse())
1698 // Compute A & ((1 << B) | 1)
1699 Value *NewOr = nullptr;
1700 if (auto *C = dyn_cast<Constant>(B)) {
1701 if (UsesRemoved >= 1)
1702 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1704 if (UsesRemoved >= 3)
1705 NewOr = Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(),
1707 One, Or->getName());
1710 Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName());
1711 Cmp.setOperand(0, NewAnd);
1717 // (X & C2) > C1 --> (X & C2) != 0, if any bit set in (X & C2) will produce a
1718 // result greater than C1.
1719 unsigned NumTZ = C2->countTrailingZeros();
1720 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && NumTZ < C2->getBitWidth() &&
1721 APInt::getOneBitSet(C2->getBitWidth(), NumTZ).ugt(*C1)) {
1722 Constant *Zero = Constant::getNullValue(And->getType());
1723 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
1729 /// Fold icmp (and X, Y), C.
1730 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
1731 BinaryOperator *And,
1733 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1736 // TODO: These all require that Y is constant too, so refactor with the above.
1738 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1739 Value *X = And->getOperand(0);
1740 Value *Y = And->getOperand(1);
1741 if (auto *LI = dyn_cast<LoadInst>(X))
1742 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1743 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1744 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1745 !LI->isVolatile() && isa<ConstantInt>(Y)) {
1746 ConstantInt *C2 = cast<ConstantInt>(Y);
1747 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1751 if (!Cmp.isEquality())
1754 // X & -C == -C -> X > u ~C
1755 // X & -C != -C -> X <= u ~C
1756 // iff C is a power of 2
1757 if (Cmp.getOperand(1) == Y && (-(*C)).isPowerOf2()) {
1758 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1759 : CmpInst::ICMP_ULE;
1760 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1763 // (X & C2) == 0 -> (trunc X) >= 0
1764 // (X & C2) != 0 -> (trunc X) < 0
1765 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1767 if (And->hasOneUse() && *C == 0 && match(Y, m_APInt(C2))) {
1768 int32_t ExactLogBase2 = C2->exactLogBase2();
1769 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1770 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1771 if (And->getType()->isVectorTy())
1772 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
1773 Value *Trunc = Builder->CreateTrunc(X, NTy);
1774 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1775 : CmpInst::ICMP_SLT;
1776 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1783 /// Fold icmp (or X, Y), C.
1784 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
1786 ICmpInst::Predicate Pred = Cmp.getPredicate();
1788 // icmp slt signum(V) 1 --> icmp slt V, 1
1790 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1791 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1792 ConstantInt::get(V->getType(), 1));
1795 // X | C == C --> X <=u C
1796 // X | C != C --> X >u C
1797 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
1798 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) &&
1799 (*C + 1).isPowerOf2()) {
1800 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1801 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1));
1804 if (!Cmp.isEquality() || *C != 0 || !Or->hasOneUse())
1808 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1809 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1810 // -> and (icmp eq P, null), (icmp eq Q, null).
1812 Builder->CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1814 Builder->CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1815 auto LogicOpc = Pred == ICmpInst::Predicate::ICMP_EQ ? Instruction::And
1817 return BinaryOperator::Create(LogicOpc, CmpP, CmpQ);
1823 /// Fold icmp (mul X, Y), C.
1824 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp,
1825 BinaryOperator *Mul,
1828 if (!match(Mul->getOperand(1), m_APInt(MulC)))
1831 // If this is a test of the sign bit and the multiply is sign-preserving with
1832 // a constant operand, use the multiply LHS operand instead.
1833 ICmpInst::Predicate Pred = Cmp.getPredicate();
1834 if (isSignTest(Pred, *C) && Mul->hasNoSignedWrap()) {
1835 if (MulC->isNegative())
1836 Pred = ICmpInst::getSwappedPredicate(Pred);
1837 return new ICmpInst(Pred, Mul->getOperand(0),
1838 Constant::getNullValue(Mul->getType()));
1844 /// Fold icmp (shl 1, Y), C.
1845 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1848 if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1851 Type *ShiftType = Shl->getType();
1852 uint32_t TypeBits = C->getBitWidth();
1853 bool CIsPowerOf2 = C->isPowerOf2();
1854 ICmpInst::Predicate Pred = Cmp.getPredicate();
1855 if (Cmp.isUnsigned()) {
1856 // (1 << Y) pred C -> Y pred Log2(C)
1858 // (1 << Y) < 30 -> Y <= 4
1859 // (1 << Y) <= 30 -> Y <= 4
1860 // (1 << Y) >= 30 -> Y > 4
1861 // (1 << Y) > 30 -> Y > 4
1862 if (Pred == ICmpInst::ICMP_ULT)
1863 Pred = ICmpInst::ICMP_ULE;
1864 else if (Pred == ICmpInst::ICMP_UGE)
1865 Pred = ICmpInst::ICMP_UGT;
1868 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1869 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31
1870 unsigned CLog2 = C->logBase2();
1871 if (CLog2 == TypeBits - 1) {
1872 if (Pred == ICmpInst::ICMP_UGE)
1873 Pred = ICmpInst::ICMP_EQ;
1874 else if (Pred == ICmpInst::ICMP_ULT)
1875 Pred = ICmpInst::ICMP_NE;
1877 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1878 } else if (Cmp.isSigned()) {
1879 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1880 if (C->isAllOnesValue()) {
1881 // (1 << Y) <= -1 -> Y == 31
1882 if (Pred == ICmpInst::ICMP_SLE)
1883 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1885 // (1 << Y) > -1 -> Y != 31
1886 if (Pred == ICmpInst::ICMP_SGT)
1887 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1889 // (1 << Y) < 0 -> Y == 31
1890 // (1 << Y) <= 0 -> Y == 31
1891 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1892 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
1894 // (1 << Y) >= 0 -> Y != 31
1895 // (1 << Y) > 0 -> Y != 31
1896 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
1897 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
1899 } else if (Cmp.isEquality() && CIsPowerOf2) {
1900 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C->logBase2()));
1906 /// Fold icmp (shl X, Y), C.
1907 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
1908 BinaryOperator *Shl,
1910 const APInt *ShiftVal;
1911 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
1912 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), *C, *ShiftVal);
1914 const APInt *ShiftAmt;
1915 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
1916 return foldICmpShlOne(Cmp, Shl, C);
1918 // Check that the shift amount is in range. If not, don't perform undefined
1919 // shifts. When the shift is visited, it will be simplified.
1920 unsigned TypeBits = C->getBitWidth();
1921 if (ShiftAmt->uge(TypeBits))
1924 ICmpInst::Predicate Pred = Cmp.getPredicate();
1925 Value *X = Shl->getOperand(0);
1926 Type *ShType = Shl->getType();
1928 // NSW guarantees that we are only shifting out sign bits from the high bits,
1929 // so we can ASHR the compare constant without needing a mask and eliminate
1931 if (Shl->hasNoSignedWrap()) {
1932 if (Pred == ICmpInst::ICMP_SGT) {
1933 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
1934 APInt ShiftedC = C->ashr(*ShiftAmt);
1935 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1937 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1938 // This is the same code as the SGT case, but assert the pre-condition
1939 // that is needed for this to work with equality predicates.
1940 assert(C->ashr(*ShiftAmt).shl(*ShiftAmt) == *C &&
1941 "Compare known true or false was not folded");
1942 APInt ShiftedC = C->ashr(*ShiftAmt);
1943 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1945 if (Pred == ICmpInst::ICMP_SLT) {
1946 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
1947 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
1948 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
1949 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
1950 assert(!C->isMinSignedValue() && "Unexpected icmp slt");
1951 APInt ShiftedC = (*C - 1).ashr(*ShiftAmt) + 1;
1952 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1954 // If this is a signed comparison to 0 and the shift is sign preserving,
1955 // use the shift LHS operand instead; isSignTest may change 'Pred', so only
1956 // do that if we're sure to not continue on in this function.
1957 if (isSignTest(Pred, *C))
1958 return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
1961 // NUW guarantees that we are only shifting out zero bits from the high bits,
1962 // so we can LSHR the compare constant without needing a mask and eliminate
1964 if (Shl->hasNoUnsignedWrap()) {
1965 if (Pred == ICmpInst::ICMP_UGT) {
1966 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
1967 APInt ShiftedC = C->lshr(*ShiftAmt);
1968 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1970 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) {
1971 // This is the same code as the UGT case, but assert the pre-condition
1972 // that is needed for this to work with equality predicates.
1973 assert(C->lshr(*ShiftAmt).shl(*ShiftAmt) == *C &&
1974 "Compare known true or false was not folded");
1975 APInt ShiftedC = C->lshr(*ShiftAmt);
1976 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1978 if (Pred == ICmpInst::ICMP_ULT) {
1979 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
1980 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
1981 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
1982 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
1983 assert(C->ugt(0) && "ult 0 should have been eliminated");
1984 APInt ShiftedC = (*C - 1).lshr(*ShiftAmt) + 1;
1985 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
1989 if (Cmp.isEquality() && Shl->hasOneUse()) {
1990 // Strength-reduce the shift into an 'and'.
1991 Constant *Mask = ConstantInt::get(
1993 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
1994 Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
1995 Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt));
1996 return new ICmpInst(Pred, And, LShrC);
1999 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2000 bool TrueIfSigned = false;
2001 if (Shl->hasOneUse() && isSignBitCheck(Pred, *C, TrueIfSigned)) {
2002 // (X << 31) <s 0 --> (X & 1) != 0
2003 Constant *Mask = ConstantInt::get(
2005 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2006 Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
2007 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2008 And, Constant::getNullValue(ShType));
2011 // Transform (icmp pred iM (shl iM %v, N), C)
2012 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2013 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2014 // This enables us to get rid of the shift in favor of a trunc that may be
2015 // free on the target. It has the additional benefit of comparing to a
2016 // smaller constant that may be more target-friendly.
2017 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2018 if (Shl->hasOneUse() && Amt != 0 && C->countTrailingZeros() >= Amt &&
2019 DL.isLegalInteger(TypeBits - Amt)) {
2020 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2021 if (ShType->isVectorTy())
2022 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
2024 ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt));
2025 return new ICmpInst(Pred, Builder->CreateTrunc(X, TruncTy), NewC);
2031 /// Fold icmp ({al}shr X, Y), C.
2032 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
2033 BinaryOperator *Shr,
2035 // An exact shr only shifts out zero bits, so:
2036 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2037 Value *X = Shr->getOperand(0);
2038 CmpInst::Predicate Pred = Cmp.getPredicate();
2039 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && *C == 0)
2040 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2042 const APInt *ShiftVal;
2043 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2044 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), *C, *ShiftVal);
2046 const APInt *ShiftAmt;
2047 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2050 // Check that the shift amount is in range. If not, don't perform undefined
2051 // shifts. When the shift is visited it will be simplified.
2052 unsigned TypeBits = C->getBitWidth();
2053 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2054 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2057 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2058 if (!Cmp.isEquality()) {
2059 // If we have an unsigned comparison and an ashr, we can't simplify this.
2060 // Similarly for signed comparisons with lshr.
2061 if (Cmp.isSigned() != IsAShr)
2064 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
2065 // by a power of 2. Since we already have logic to simplify these,
2066 // transform to div and then simplify the resultant comparison.
2067 if (IsAShr && (!Shr->isExact() || ShAmtVal == TypeBits - 1))
2070 // Revisit the shift (to delete it).
2073 Constant *DivCst = ConstantInt::get(
2074 Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
2076 Value *Tmp = IsAShr ? Builder->CreateSDiv(X, DivCst, "", Shr->isExact())
2077 : Builder->CreateUDiv(X, DivCst, "", Shr->isExact());
2079 Cmp.setOperand(0, Tmp);
2081 // If the builder folded the binop, just return it.
2082 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
2086 // Otherwise, fold this div/compare.
2087 assert(TheDiv->getOpcode() == Instruction::SDiv ||
2088 TheDiv->getOpcode() == Instruction::UDiv);
2090 Instruction *Res = foldICmpDivConstant(Cmp, TheDiv, C);
2091 assert(Res && "This div/cst should have folded!");
2095 // Handle equality comparisons of shift-by-constant.
2097 // If the comparison constant changes with the shift, the comparison cannot
2098 // succeed (bits of the comparison constant cannot match the shifted value).
2099 // This should be known by InstSimplify and already be folded to true/false.
2100 assert(((IsAShr && C->shl(ShAmtVal).ashr(ShAmtVal) == *C) ||
2101 (!IsAShr && C->shl(ShAmtVal).lshr(ShAmtVal) == *C)) &&
2102 "Expected icmp+shr simplify did not occur.");
2104 // Check if the bits shifted out are known to be zero. If so, we can compare
2105 // against the unshifted value:
2106 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2107 Constant *ShiftedCmpRHS = ConstantInt::get(Shr->getType(), *C << ShAmtVal);
2108 if (Shr->hasOneUse()) {
2110 return new ICmpInst(Pred, X, ShiftedCmpRHS);
2112 // Otherwise strength reduce the shift into an 'and'.
2113 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2114 Constant *Mask = ConstantInt::get(Shr->getType(), Val);
2115 Value *And = Builder->CreateAnd(X, Mask, Shr->getName() + ".mask");
2116 return new ICmpInst(Pred, And, ShiftedCmpRHS);
2122 /// Fold icmp (udiv X, Y), C.
2123 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp,
2124 BinaryOperator *UDiv,
2127 if (!match(UDiv->getOperand(0), m_APInt(C2)))
2130 assert(C2 != 0 && "udiv 0, X should have been simplified already.");
2132 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2133 Value *Y = UDiv->getOperand(1);
2134 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2135 assert(!C->isMaxValue() &&
2136 "icmp ugt X, UINT_MAX should have been simplified already.");
2137 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2138 ConstantInt::get(Y->getType(), C2->udiv(*C + 1)));
2141 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2142 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2143 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2144 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2145 ConstantInt::get(Y->getType(), C2->udiv(*C)));
2151 /// Fold icmp ({su}div X, Y), C.
2152 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
2153 BinaryOperator *Div,
2155 // Fold: icmp pred ([us]div X, C2), C -> range test
2156 // Fold this div into the comparison, producing a range check.
2157 // Determine, based on the divide type, what the range is being
2158 // checked. If there is an overflow on the low or high side, remember
2159 // it, otherwise compute the range [low, hi) bounding the new value.
2160 // See: InsertRangeTest above for the kinds of replacements possible.
2162 if (!match(Div->getOperand(1), m_APInt(C2)))
2165 // FIXME: If the operand types don't match the type of the divide
2166 // then don't attempt this transform. The code below doesn't have the
2167 // logic to deal with a signed divide and an unsigned compare (and
2168 // vice versa). This is because (x /s C2) <s C produces different
2169 // results than (x /s C2) <u C or (x /u C2) <s C or even
2170 // (x /u C2) <u C. Simply casting the operands and result won't
2171 // work. :( The if statement below tests that condition and bails
2173 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2174 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2177 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2178 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2179 // division-by-constant cases should be present, we can not assert that they
2180 // have happened before we reach this icmp instruction.
2181 if (*C2 == 0 || *C2 == 1 || (DivIsSigned && C2->isAllOnesValue()))
2184 // TODO: We could do all of the computations below using APInt.
2185 Constant *CmpRHS = cast<Constant>(Cmp.getOperand(1));
2186 Constant *DivRHS = cast<Constant>(Div->getOperand(1));
2188 // Compute Prod = CmpRHS * DivRHS. We are essentially solving an equation of
2189 // form X / C2 = C. We solve for X by multiplying C2 (DivRHS) and C (CmpRHS).
2190 // By solving for X, we can turn this into a range check instead of computing
2192 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
2194 // Determine if the product overflows by seeing if the product is not equal to
2195 // the divide. Make sure we do the same kind of divide as in the LHS
2196 // instruction that we're folding.
2197 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS)
2198 : ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
2200 ICmpInst::Predicate Pred = Cmp.getPredicate();
2202 // If the division is known to be exact, then there is no remainder from the
2203 // divide, so the covered range size is unit, otherwise it is the divisor.
2204 Constant *RangeSize =
2205 Div->isExact() ? ConstantInt::get(Div->getType(), 1) : DivRHS;
2207 // Figure out the interval that is being checked. For example, a comparison
2208 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2209 // Compute this interval based on the constants involved and the signedness of
2210 // the compare/divide. This computes a half-open interval, keeping track of
2211 // whether either value in the interval overflows. After analysis each
2212 // overflow variable is set to 0 if it's corresponding bound variable is valid
2213 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2214 int LoOverflow = 0, HiOverflow = 0;
2215 Constant *LoBound = nullptr, *HiBound = nullptr;
2217 if (!DivIsSigned) { // udiv
2218 // e.g. X/5 op 3 --> [15, 20)
2220 HiOverflow = LoOverflow = ProdOV;
2222 // If this is not an exact divide, then many values in the range collapse
2223 // to the same result value.
2224 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2226 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2227 if (*C == 0) { // (X / pos) op 0
2228 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2229 LoBound = ConstantExpr::getNeg(SubOne(RangeSize));
2230 HiBound = RangeSize;
2231 } else if (C->isStrictlyPositive()) { // (X / pos) op pos
2232 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2233 HiOverflow = LoOverflow = ProdOV;
2235 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2236 } else { // (X / pos) op neg
2237 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2238 HiBound = AddOne(Prod);
2239 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2241 Constant *DivNeg = ConstantExpr::getNeg(RangeSize);
2242 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2245 } else if (C2->isNegative()) { // Divisor is < 0.
2247 RangeSize = ConstantExpr::getNeg(RangeSize);
2248 if (*C == 0) { // (X / neg) op 0
2249 // e.g. X/-5 op 0 --> [-4, 5)
2250 LoBound = AddOne(RangeSize);
2251 HiBound = ConstantExpr::getNeg(RangeSize);
2252 if (HiBound == DivRHS) { // -INTMIN = INTMIN
2253 HiOverflow = 1; // [INTMIN+1, overflow)
2254 HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
2256 } else if (C->isStrictlyPositive()) { // (X / neg) op pos
2257 // e.g. X/-5 op 3 --> [-19, -14)
2258 HiBound = AddOne(Prod);
2259 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2261 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2262 } else { // (X / neg) op neg
2263 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2264 LoOverflow = HiOverflow = ProdOV;
2266 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2269 // Dividing by a negative swaps the condition. LT <-> GT
2270 Pred = ICmpInst::getSwappedPredicate(Pred);
2273 Value *X = Div->getOperand(0);
2275 default: llvm_unreachable("Unhandled icmp opcode!");
2276 case ICmpInst::ICMP_EQ:
2277 if (LoOverflow && HiOverflow)
2278 return replaceInstUsesWith(Cmp, Builder->getFalse());
2280 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2281 ICmpInst::ICMP_UGE, X, LoBound);
2283 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2284 ICmpInst::ICMP_ULT, X, HiBound);
2285 return replaceInstUsesWith(
2286 Cmp, insertRangeTest(X, LoBound->getUniqueInteger(),
2287 HiBound->getUniqueInteger(), DivIsSigned, true));
2288 case ICmpInst::ICMP_NE:
2289 if (LoOverflow && HiOverflow)
2290 return replaceInstUsesWith(Cmp, Builder->getTrue());
2292 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2293 ICmpInst::ICMP_ULT, X, LoBound);
2295 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2296 ICmpInst::ICMP_UGE, X, HiBound);
2297 return replaceInstUsesWith(Cmp,
2298 insertRangeTest(X, LoBound->getUniqueInteger(),
2299 HiBound->getUniqueInteger(),
2300 DivIsSigned, false));
2301 case ICmpInst::ICMP_ULT:
2302 case ICmpInst::ICMP_SLT:
2303 if (LoOverflow == +1) // Low bound is greater than input range.
2304 return replaceInstUsesWith(Cmp, Builder->getTrue());
2305 if (LoOverflow == -1) // Low bound is less than input range.
2306 return replaceInstUsesWith(Cmp, Builder->getFalse());
2307 return new ICmpInst(Pred, X, LoBound);
2308 case ICmpInst::ICMP_UGT:
2309 case ICmpInst::ICMP_SGT:
2310 if (HiOverflow == +1) // High bound greater than input range.
2311 return replaceInstUsesWith(Cmp, Builder->getFalse());
2312 if (HiOverflow == -1) // High bound less than input range.
2313 return replaceInstUsesWith(Cmp, Builder->getTrue());
2314 if (Pred == ICmpInst::ICMP_UGT)
2315 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
2316 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
2322 /// Fold icmp (sub X, Y), C.
2323 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
2324 BinaryOperator *Sub,
2326 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2327 ICmpInst::Predicate Pred = Cmp.getPredicate();
2329 // The following transforms are only worth it if the only user of the subtract
2331 if (!Sub->hasOneUse())
2334 if (Sub->hasNoSignedWrap()) {
2335 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2336 if (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())
2337 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2339 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2340 if (Pred == ICmpInst::ICMP_SGT && *C == 0)
2341 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2343 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2344 if (Pred == ICmpInst::ICMP_SLT && *C == 0)
2345 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2347 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2348 if (Pred == ICmpInst::ICMP_SLT && *C == 1)
2349 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2353 if (!match(X, m_APInt(C2)))
2356 // C2 - Y <u C -> (Y | (C - 1)) == C2
2357 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2358 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() &&
2359 (*C2 & (*C - 1)) == (*C - 1))
2360 return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateOr(Y, *C - 1), X);
2362 // C2 - Y >u C -> (Y | C) != C2
2363 // iff C2 & C == C and C + 1 is a power of 2
2364 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C)
2365 return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateOr(Y, *C), X);
2370 /// Fold icmp (add X, Y), C.
2371 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
2372 BinaryOperator *Add,
2374 Value *Y = Add->getOperand(1);
2376 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2379 // Fold icmp pred (add X, C2), C.
2380 Value *X = Add->getOperand(0);
2381 Type *Ty = Add->getType();
2382 CmpInst::Predicate Pred = Cmp.getPredicate();
2384 // If the add does not wrap, we can always adjust the compare by subtracting
2385 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are
2386 // canonicalized to SGT/SLT.
2387 if (Add->hasNoSignedWrap() &&
2388 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) {
2390 APInt NewC = C->ssub_ov(*C2, Overflow);
2391 // If there is overflow, the result must be true or false.
2392 // TODO: Can we assert there is no overflow because InstSimplify always
2393 // handles those cases?
2395 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2396 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2399 auto CR = ConstantRange::makeExactICmpRegion(Pred, *C).subtract(*C2);
2400 const APInt &Upper = CR.getUpper();
2401 const APInt &Lower = CR.getLower();
2402 if (Cmp.isSigned()) {
2403 if (Lower.isSignMask())
2404 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2405 if (Upper.isSignMask())
2406 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2408 if (Lower.isMinValue())
2409 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2410 if (Upper.isMinValue())
2411 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2414 if (!Add->hasOneUse())
2417 // X+C <u C2 -> (X & -C2) == C
2418 // iff C & (C2-1) == 0
2419 // C2 is a power of 2
2420 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0)
2421 return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateAnd(X, -(*C)),
2422 ConstantExpr::getNeg(cast<Constant>(Y)));
2424 // X+C >u C2 -> (X & ~C2) != C
2426 // C2+1 is a power of 2
2427 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0)
2428 return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateAnd(X, ~(*C)),
2429 ConstantExpr::getNeg(cast<Constant>(Y)));
2434 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2435 /// where X is some kind of instruction.
2436 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) {
2438 if (!match(Cmp.getOperand(1), m_APInt(C)))
2442 if (match(Cmp.getOperand(0), m_BinOp(BO))) {
2443 switch (BO->getOpcode()) {
2444 case Instruction::Xor:
2445 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
2448 case Instruction::And:
2449 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
2452 case Instruction::Or:
2453 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
2456 case Instruction::Mul:
2457 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
2460 case Instruction::Shl:
2461 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
2464 case Instruction::LShr:
2465 case Instruction::AShr:
2466 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
2469 case Instruction::UDiv:
2470 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
2473 case Instruction::SDiv:
2474 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
2477 case Instruction::Sub:
2478 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
2481 case Instruction::Add:
2482 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
2488 // TODO: These folds could be refactored to be part of the above calls.
2489 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, C))
2494 if (match(Cmp.getOperand(0), m_Instruction(LHSI)) &&
2495 LHSI->getOpcode() == Instruction::Trunc)
2496 if (Instruction *I = foldICmpTruncConstant(Cmp, LHSI, C))
2499 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, C))
2505 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2506 /// icmp eq/ne BO, C.
2507 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
2510 // TODO: Some of these folds could work with arbitrary constants, but this
2511 // function is limited to scalar and vector splat constants.
2512 if (!Cmp.isEquality())
2515 ICmpInst::Predicate Pred = Cmp.getPredicate();
2516 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2517 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2518 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2520 switch (BO->getOpcode()) {
2521 case Instruction::SRem:
2522 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2523 if (*C == 0 && BO->hasOneUse()) {
2525 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2526 Value *NewRem = Builder->CreateURem(BOp0, BOp1, BO->getName());
2527 return new ICmpInst(Pred, NewRem,
2528 Constant::getNullValue(BO->getType()));
2532 case Instruction::Add: {
2533 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2535 if (match(BOp1, m_APInt(BOC))) {
2536 if (BO->hasOneUse()) {
2537 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1));
2538 return new ICmpInst(Pred, BOp0, SubC);
2540 } else if (*C == 0) {
2541 // Replace ((add A, B) != 0) with (A != -B) if A or B is
2542 // efficiently invertible, or if the add has just this one use.
2543 if (Value *NegVal = dyn_castNegVal(BOp1))
2544 return new ICmpInst(Pred, BOp0, NegVal);
2545 if (Value *NegVal = dyn_castNegVal(BOp0))
2546 return new ICmpInst(Pred, NegVal, BOp1);
2547 if (BO->hasOneUse()) {
2548 Value *Neg = Builder->CreateNeg(BOp1);
2550 return new ICmpInst(Pred, BOp0, Neg);
2555 case Instruction::Xor:
2556 if (BO->hasOneUse()) {
2557 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2558 // For the xor case, we can xor two constants together, eliminating
2559 // the explicit xor.
2560 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
2561 } else if (*C == 0) {
2562 // Replace ((xor A, B) != 0) with (A != B)
2563 return new ICmpInst(Pred, BOp0, BOp1);
2567 case Instruction::Sub:
2568 if (BO->hasOneUse()) {
2570 if (match(BOp0, m_APInt(BOC))) {
2571 // Replace ((sub BOC, B) != C) with (B != BOC-C).
2572 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS);
2573 return new ICmpInst(Pred, BOp1, SubC);
2574 } else if (*C == 0) {
2575 // Replace ((sub A, B) != 0) with (A != B).
2576 return new ICmpInst(Pred, BOp0, BOp1);
2580 case Instruction::Or: {
2582 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
2583 // Comparing if all bits outside of a constant mask are set?
2584 // Replace (X | C) == -1 with (X & ~C) == ~C.
2585 // This removes the -1 constant.
2586 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
2587 Value *And = Builder->CreateAnd(BOp0, NotBOC);
2588 return new ICmpInst(Pred, And, NotBOC);
2592 case Instruction::And: {
2594 if (match(BOp1, m_APInt(BOC))) {
2595 // If we have ((X & C) == C), turn it into ((X & C) != 0).
2596 if (C == BOC && C->isPowerOf2())
2597 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
2598 BO, Constant::getNullValue(RHS->getType()));
2600 // Don't perform the following transforms if the AND has multiple uses
2601 if (!BO->hasOneUse())
2604 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
2605 if (BOC->isSignMask()) {
2606 Constant *Zero = Constant::getNullValue(BOp0->getType());
2607 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
2608 return new ICmpInst(NewPred, BOp0, Zero);
2611 // ((X & ~7) == 0) --> X < 8
2612 if (*C == 0 && (~(*BOC) + 1).isPowerOf2()) {
2613 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1));
2614 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
2615 return new ICmpInst(NewPred, BOp0, NegBOC);
2620 case Instruction::Mul:
2621 if (*C == 0 && BO->hasNoSignedWrap()) {
2623 if (match(BOp1, m_APInt(BOC)) && *BOC != 0) {
2624 // The trivial case (mul X, 0) is handled by InstSimplify.
2625 // General case : (mul X, C) != 0 iff X != 0
2626 // (mul X, C) == 0 iff X == 0
2627 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType()));
2631 case Instruction::UDiv:
2633 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
2634 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
2635 return new ICmpInst(NewPred, BOp1, BOp0);
2644 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
2645 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
2647 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0));
2648 if (!II || !Cmp.isEquality())
2651 // Handle icmp {eq|ne} <intrinsic>, intcst.
2652 switch (II->getIntrinsicID()) {
2653 case Intrinsic::bswap:
2655 Cmp.setOperand(0, II->getArgOperand(0));
2656 Cmp.setOperand(1, Builder->getInt(C->byteSwap()));
2658 case Intrinsic::ctlz:
2659 case Intrinsic::cttz:
2660 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
2661 if (*C == C->getBitWidth()) {
2663 Cmp.setOperand(0, II->getArgOperand(0));
2664 Cmp.setOperand(1, ConstantInt::getNullValue(II->getType()));
2668 case Intrinsic::ctpop: {
2669 // popcount(A) == 0 -> A == 0 and likewise for !=
2670 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
2671 bool IsZero = *C == 0;
2672 if (IsZero || *C == C->getBitWidth()) {
2674 Cmp.setOperand(0, II->getArgOperand(0));
2675 auto *NewOp = IsZero ? Constant::getNullValue(II->getType())
2676 : Constant::getAllOnesValue(II->getType());
2677 Cmp.setOperand(1, NewOp);
2688 /// Handle icmp with constant (but not simple integer constant) RHS.
2689 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
2690 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2691 Constant *RHSC = dyn_cast<Constant>(Op1);
2692 Instruction *LHSI = dyn_cast<Instruction>(Op0);
2696 switch (LHSI->getOpcode()) {
2697 case Instruction::GetElementPtr:
2698 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
2699 if (RHSC->isNullValue() &&
2700 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
2701 return new ICmpInst(
2702 I.getPredicate(), LHSI->getOperand(0),
2703 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2705 case Instruction::PHI:
2706 // Only fold icmp into the PHI if the phi and icmp are in the same
2707 // block. If in the same block, we're encouraging jump threading. If
2708 // not, we are just pessimizing the code by making an i1 phi.
2709 if (LHSI->getParent() == I.getParent())
2710 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
2713 case Instruction::Select: {
2714 // If either operand of the select is a constant, we can fold the
2715 // comparison into the select arms, which will cause one to be
2716 // constant folded and the select turned into a bitwise or.
2717 Value *Op1 = nullptr, *Op2 = nullptr;
2718 ConstantInt *CI = nullptr;
2719 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
2720 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2721 CI = dyn_cast<ConstantInt>(Op1);
2723 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
2724 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
2725 CI = dyn_cast<ConstantInt>(Op2);
2728 // We only want to perform this transformation if it will not lead to
2729 // additional code. This is true if either both sides of the select
2730 // fold to a constant (in which case the icmp is replaced with a select
2731 // which will usually simplify) or this is the only user of the
2732 // select (in which case we are trading a select+icmp for a simpler
2733 // select+icmp) or all uses of the select can be replaced based on
2734 // dominance information ("Global cases").
2735 bool Transform = false;
2738 else if (Op1 || Op2) {
2740 if (LHSI->hasOneUse())
2743 else if (CI && !CI->isZero())
2744 // When Op1 is constant try replacing select with second operand.
2745 // Otherwise Op2 is constant and try replacing select with first
2748 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
2752 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
2755 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
2757 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
2761 case Instruction::IntToPtr:
2762 // icmp pred inttoptr(X), null -> icmp pred X, 0
2763 if (RHSC->isNullValue() &&
2764 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
2765 return new ICmpInst(
2766 I.getPredicate(), LHSI->getOperand(0),
2767 Constant::getNullValue(LHSI->getOperand(0)->getType()));
2770 case Instruction::Load:
2771 // Try to optimize things like "A[i] > 4" to index computations.
2772 if (GetElementPtrInst *GEP =
2773 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
2774 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
2775 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
2776 !cast<LoadInst>(LHSI)->isVolatile())
2777 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
2786 /// Try to fold icmp (binop), X or icmp X, (binop).
2787 /// TODO: A large part of this logic is duplicated in InstSimplify's
2788 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
2790 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
2791 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2793 // Special logic for binary operators.
2794 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
2795 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
2799 const CmpInst::Predicate Pred = I.getPredicate();
2800 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
2801 if (BO0 && isa<OverflowingBinaryOperator>(BO0))
2803 ICmpInst::isEquality(Pred) ||
2804 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
2805 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
2806 if (BO1 && isa<OverflowingBinaryOperator>(BO1))
2808 ICmpInst::isEquality(Pred) ||
2809 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
2810 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
2812 // Analyze the case when either Op0 or Op1 is an add instruction.
2813 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
2814 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2815 if (BO0 && BO0->getOpcode() == Instruction::Add) {
2816 A = BO0->getOperand(0);
2817 B = BO0->getOperand(1);
2819 if (BO1 && BO1->getOpcode() == Instruction::Add) {
2820 C = BO1->getOperand(0);
2821 D = BO1->getOperand(1);
2824 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2825 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
2826 return new ICmpInst(Pred, A == Op1 ? B : A,
2827 Constant::getNullValue(Op1->getType()));
2829 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2830 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
2831 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
2834 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
2835 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
2837 // Try not to increase register pressure.
2838 BO0->hasOneUse() && BO1->hasOneUse()) {
2839 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2842 // C + B == C + D -> B == D
2845 } else if (A == D) {
2846 // D + B == C + D -> B == C
2849 } else if (B == C) {
2850 // A + C == C + D -> A == D
2855 // A + D == C + D -> A == C
2859 return new ICmpInst(Pred, Y, Z);
2862 // icmp slt (X + -1), Y -> icmp sle X, Y
2863 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
2864 match(B, m_AllOnes()))
2865 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
2867 // icmp sge (X + -1), Y -> icmp sgt X, Y
2868 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
2869 match(B, m_AllOnes()))
2870 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
2872 // icmp sle (X + 1), Y -> icmp slt X, Y
2873 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
2874 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
2876 // icmp sgt (X + 1), Y -> icmp sge X, Y
2877 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
2878 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
2880 // icmp sgt X, (Y + -1) -> icmp sge X, Y
2881 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
2882 match(D, m_AllOnes()))
2883 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
2885 // icmp sle X, (Y + -1) -> icmp slt X, Y
2886 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
2887 match(D, m_AllOnes()))
2888 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
2890 // icmp sge X, (Y + 1) -> icmp sgt X, Y
2891 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
2892 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
2894 // icmp slt X, (Y + 1) -> icmp sle X, Y
2895 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
2896 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
2898 // TODO: The subtraction-related identities shown below also hold, but
2899 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
2900 // wouldn't happen even if they were implemented.
2902 // icmp ult (X - 1), Y -> icmp ule X, Y
2903 // icmp uge (X - 1), Y -> icmp ugt X, Y
2904 // icmp ugt X, (Y - 1) -> icmp uge X, Y
2905 // icmp ule X, (Y - 1) -> icmp ult X, Y
2907 // icmp ule (X + 1), Y -> icmp ult X, Y
2908 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
2909 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
2911 // icmp ugt (X + 1), Y -> icmp uge X, Y
2912 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
2913 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
2915 // icmp uge X, (Y + 1) -> icmp ugt X, Y
2916 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
2917 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
2919 // icmp ult X, (Y + 1) -> icmp ule X, Y
2920 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
2921 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
2923 // if C1 has greater magnitude than C2:
2924 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y
2925 // s.t. C3 = C1 - C2
2927 // if C2 has greater magnitude than C1:
2928 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3)
2929 // s.t. C3 = C2 - C1
2930 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
2931 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
2932 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
2933 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
2934 const APInt &AP1 = C1->getValue();
2935 const APInt &AP2 = C2->getValue();
2936 if (AP1.isNegative() == AP2.isNegative()) {
2937 APInt AP1Abs = C1->getValue().abs();
2938 APInt AP2Abs = C2->getValue().abs();
2939 if (AP1Abs.uge(AP2Abs)) {
2940 ConstantInt *C3 = Builder->getInt(AP1 - AP2);
2941 Value *NewAdd = Builder->CreateNSWAdd(A, C3);
2942 return new ICmpInst(Pred, NewAdd, C);
2944 ConstantInt *C3 = Builder->getInt(AP2 - AP1);
2945 Value *NewAdd = Builder->CreateNSWAdd(C, C3);
2946 return new ICmpInst(Pred, A, NewAdd);
2951 // Analyze the case when either Op0 or Op1 is a sub instruction.
2952 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
2957 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
2958 A = BO0->getOperand(0);
2959 B = BO0->getOperand(1);
2961 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
2962 C = BO1->getOperand(0);
2963 D = BO1->getOperand(1);
2966 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
2967 if (A == Op1 && NoOp0WrapProblem)
2968 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
2970 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
2971 if (C == Op0 && NoOp1WrapProblem)
2972 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
2974 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
2975 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
2976 // Try not to increase register pressure.
2977 BO0->hasOneUse() && BO1->hasOneUse())
2978 return new ICmpInst(Pred, A, C);
2980 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
2981 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
2982 // Try not to increase register pressure.
2983 BO0->hasOneUse() && BO1->hasOneUse())
2984 return new ICmpInst(Pred, D, B);
2986 // icmp (0-X) < cst --> x > -cst
2987 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
2989 if (match(BO0, m_Neg(m_Value(X))))
2990 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
2991 if (!RHSC->isMinValue(/*isSigned=*/true))
2992 return new ICmpInst(I.getSwappedPredicate(), X,
2993 ConstantExpr::getNeg(RHSC));
2996 BinaryOperator *SRem = nullptr;
2997 // icmp (srem X, Y), Y
2998 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
3000 // icmp Y, (srem X, Y)
3001 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3002 Op0 == BO1->getOperand(1))
3005 // We don't check hasOneUse to avoid increasing register pressure because
3006 // the value we use is the same value this instruction was already using.
3007 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3010 case ICmpInst::ICMP_EQ:
3011 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3012 case ICmpInst::ICMP_NE:
3013 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
3014 case ICmpInst::ICMP_SGT:
3015 case ICmpInst::ICMP_SGE:
3016 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
3017 Constant::getAllOnesValue(SRem->getType()));
3018 case ICmpInst::ICMP_SLT:
3019 case ICmpInst::ICMP_SLE:
3020 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
3021 Constant::getNullValue(SRem->getType()));
3025 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
3026 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
3027 switch (BO0->getOpcode()) {
3030 case Instruction::Add:
3031 case Instruction::Sub:
3032 case Instruction::Xor: {
3033 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
3034 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3037 if (match(BO0->getOperand(1), m_APInt(C))) {
3038 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
3039 if (C->isSignMask()) {
3040 ICmpInst::Predicate NewPred =
3041 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3042 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3045 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
3046 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
3047 ICmpInst::Predicate NewPred =
3048 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3049 NewPred = I.getSwappedPredicate(NewPred);
3050 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3055 case Instruction::Mul: {
3056 if (!I.isEquality())
3060 if (match(BO0->getOperand(1), m_APInt(C)) && *C != 0 && *C != 1) {
3061 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
3062 // Mask = -1 >> count-trailing-zeros(C).
3063 if (unsigned TZs = C->countTrailingZeros()) {
3064 Constant *Mask = ConstantInt::get(
3066 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
3067 Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask);
3068 Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask);
3069 return new ICmpInst(Pred, And1, And2);
3071 // If there are no trailing zeros in the multiplier, just eliminate
3072 // the multiplies (no masking is needed):
3073 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y
3074 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3078 case Instruction::UDiv:
3079 case Instruction::LShr:
3080 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
3082 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3084 case Instruction::SDiv:
3085 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
3087 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3089 case Instruction::AShr:
3090 if (!BO0->isExact() || !BO1->isExact())
3092 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3094 case Instruction::Shl: {
3095 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
3096 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
3099 if (!NSW && I.isSigned())
3101 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3107 // Transform A & (L - 1) `ult` L --> L != 0
3108 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
3110 m_CombineOr(m_And(m_Value(), LSubOne), m_And(LSubOne, m_Value()));
3112 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
3113 auto *Zero = Constant::getNullValue(BO0->getType());
3114 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
3121 /// Fold icmp Pred min|max(X, Y), X.
3122 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
3123 ICmpInst::Predicate Pred = Cmp.getPredicate();
3124 Value *Op0 = Cmp.getOperand(0);
3125 Value *X = Cmp.getOperand(1);
3127 // Canonicalize minimum or maximum operand to LHS of the icmp.
3128 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
3129 match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
3130 match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
3131 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
3133 Pred = Cmp.getSwappedPredicate();
3137 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
3138 // smin(X, Y) == X --> X s<= Y
3139 // smin(X, Y) s>= X --> X s<= Y
3140 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
3141 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3143 // smin(X, Y) != X --> X s> Y
3144 // smin(X, Y) s< X --> X s> Y
3145 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
3146 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3148 // These cases should be handled in InstSimplify:
3149 // smin(X, Y) s<= X --> true
3150 // smin(X, Y) s> X --> false
3154 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
3155 // smax(X, Y) == X --> X s>= Y
3156 // smax(X, Y) s<= X --> X s>= Y
3157 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
3158 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3160 // smax(X, Y) != X --> X s< Y
3161 // smax(X, Y) s> X --> X s< Y
3162 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
3163 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3165 // These cases should be handled in InstSimplify:
3166 // smax(X, Y) s>= X --> true
3167 // smax(X, Y) s< X --> false
3171 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
3172 // umin(X, Y) == X --> X u<= Y
3173 // umin(X, Y) u>= X --> X u<= Y
3174 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
3175 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
3177 // umin(X, Y) != X --> X u> Y
3178 // umin(X, Y) u< X --> X u> Y
3179 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
3180 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
3182 // These cases should be handled in InstSimplify:
3183 // umin(X, Y) u<= X --> true
3184 // umin(X, Y) u> X --> false
3188 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
3189 // umax(X, Y) == X --> X u>= Y
3190 // umax(X, Y) u<= X --> X u>= Y
3191 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
3192 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
3194 // umax(X, Y) != X --> X u< Y
3195 // umax(X, Y) u> X --> X u< Y
3196 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
3197 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
3199 // These cases should be handled in InstSimplify:
3200 // umax(X, Y) u>= X --> true
3201 // umax(X, Y) u< X --> false
3208 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
3209 if (!I.isEquality())
3212 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3213 Value *A, *B, *C, *D;
3214 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
3215 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
3216 Value *OtherVal = A == Op1 ? B : A;
3217 return new ICmpInst(I.getPredicate(), OtherVal,
3218 Constant::getNullValue(A->getType()));
3221 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
3222 // A^c1 == C^c2 --> A == C^(c1^c2)
3223 ConstantInt *C1, *C2;
3224 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
3226 Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue());
3227 Value *Xor = Builder->CreateXor(C, NC);
3228 return new ICmpInst(I.getPredicate(), A, Xor);
3231 // A^B == A^D -> B == D
3233 return new ICmpInst(I.getPredicate(), B, D);
3235 return new ICmpInst(I.getPredicate(), B, C);
3237 return new ICmpInst(I.getPredicate(), A, D);
3239 return new ICmpInst(I.getPredicate(), A, C);
3243 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
3244 // A == (A^B) -> B == 0
3245 Value *OtherVal = A == Op0 ? B : A;
3246 return new ICmpInst(I.getPredicate(), OtherVal,
3247 Constant::getNullValue(A->getType()));
3250 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
3251 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
3252 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
3253 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
3259 } else if (A == D) {
3263 } else if (B == C) {
3267 } else if (B == D) {
3273 if (X) { // Build (X^Y) & Z
3274 Op1 = Builder->CreateXor(X, Y);
3275 Op1 = Builder->CreateAnd(Op1, Z);
3276 I.setOperand(0, Op1);
3277 I.setOperand(1, Constant::getNullValue(Op1->getType()));
3282 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
3283 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
3285 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
3286 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
3287 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
3288 match(Op1, m_ZExt(m_Value(A))))) {
3289 APInt Pow2 = Cst1->getValue() + 1;
3290 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
3291 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
3292 return new ICmpInst(I.getPredicate(), A,
3293 Builder->CreateTrunc(B, A->getType()));
3296 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
3297 // For lshr and ashr pairs.
3298 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3299 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
3300 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
3301 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
3302 unsigned TypeBits = Cst1->getBitWidth();
3303 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3304 if (ShAmt < TypeBits && ShAmt != 0) {
3305 ICmpInst::Predicate Pred = I.getPredicate() == ICmpInst::ICMP_NE
3306 ? ICmpInst::ICMP_UGE
3307 : ICmpInst::ICMP_ULT;
3308 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
3309 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
3310 return new ICmpInst(Pred, Xor, Builder->getInt(CmpVal));
3314 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
3315 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
3316 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
3317 unsigned TypeBits = Cst1->getBitWidth();
3318 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
3319 if (ShAmt < TypeBits && ShAmt != 0) {
3320 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
3321 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
3322 Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal),
3323 I.getName() + ".mask");
3324 return new ICmpInst(I.getPredicate(), And,
3325 Constant::getNullValue(Cst1->getType()));
3329 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
3330 // "icmp (and X, mask), cst"
3332 if (Op0->hasOneUse() &&
3333 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
3334 match(Op1, m_ConstantInt(Cst1)) &&
3335 // Only do this when A has multiple uses. This is most important to do
3336 // when it exposes other optimizations.
3338 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
3340 if (ShAmt < ASize) {
3342 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
3345 APInt CmpV = Cst1->getValue().zext(ASize);
3348 Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV));
3349 return new ICmpInst(I.getPredicate(), Mask, Builder->getInt(CmpV));
3356 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so
3358 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
3359 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0));
3360 Value *LHSCIOp = LHSCI->getOperand(0);
3361 Type *SrcTy = LHSCIOp->getType();
3362 Type *DestTy = LHSCI->getType();
3365 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
3366 // integer type is the same size as the pointer type.
3367 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
3368 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
3369 Value *RHSOp = nullptr;
3370 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
3371 Value *RHSCIOp = RHSC->getOperand(0);
3372 if (RHSCIOp->getType()->getPointerAddressSpace() ==
3373 LHSCIOp->getType()->getPointerAddressSpace()) {
3374 RHSOp = RHSC->getOperand(0);
3375 // If the pointer types don't match, insert a bitcast.
3376 if (LHSCIOp->getType() != RHSOp->getType())
3377 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
3379 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
3380 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
3384 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp);
3387 // The code below only handles extension cast instructions, so far.
3389 if (LHSCI->getOpcode() != Instruction::ZExt &&
3390 LHSCI->getOpcode() != Instruction::SExt)
3393 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
3394 bool isSignedCmp = ICmp.isSigned();
3396 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) {
3397 // Not an extension from the same type?
3398 RHSCIOp = CI->getOperand(0);
3399 if (RHSCIOp->getType() != LHSCIOp->getType())
3402 // If the signedness of the two casts doesn't agree (i.e. one is a sext
3403 // and the other is a zext), then we can't handle this.
3404 if (CI->getOpcode() != LHSCI->getOpcode())
3407 // Deal with equality cases early.
3408 if (ICmp.isEquality())
3409 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3411 // A signed comparison of sign extended values simplifies into a
3412 // signed comparison.
3413 if (isSignedCmp && isSignedExt)
3414 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp);
3416 // The other three cases all fold into an unsigned comparison.
3417 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
3420 // If we aren't dealing with a constant on the RHS, exit early.
3421 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
3425 // Compute the constant that would happen if we truncated to SrcTy then
3426 // re-extended to DestTy.
3427 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
3428 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy);
3430 // If the re-extended constant didn't change...
3432 // Deal with equality cases early.
3433 if (ICmp.isEquality())
3434 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3436 // A signed comparison of sign extended values simplifies into a
3437 // signed comparison.
3438 if (isSignedExt && isSignedCmp)
3439 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1);
3441 // The other three cases all fold into an unsigned comparison.
3442 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1);
3445 // The re-extended constant changed, partly changed (in the case of a vector),
3446 // or could not be determined to be equal (in the case of a constant
3447 // expression), so the constant cannot be represented in the shorter type.
3448 // Consequently, we cannot emit a simple comparison.
3449 // All the cases that fold to true or false will have already been handled
3450 // by SimplifyICmpInst, so only deal with the tricky case.
3452 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C))
3455 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
3456 // should have been folded away previously and not enter in here.
3458 // We're performing an unsigned comp with a sign extended value.
3459 // This is true if the input is >= 0. [aka >s -1]
3460 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
3461 Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
3463 // Finally, return the value computed.
3464 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
3465 return replaceInstUsesWith(ICmp, Result);
3467 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
3468 return BinaryOperator::CreateNot(Result);
3471 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
3472 Value *RHS, Instruction &OrigI,
3473 Value *&Result, Constant *&Overflow) {
3474 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
3475 std::swap(LHS, RHS);
3477 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) {
3479 Overflow = OverflowVal;
3481 Result->takeName(&OrigI);
3485 // If the overflow check was an add followed by a compare, the insertion point
3486 // may be pointing to the compare. We want to insert the new instructions
3487 // before the add in case there are uses of the add between the add and the
3489 Builder->SetInsertPoint(&OrigI);
3493 llvm_unreachable("bad overflow check kind!");
3495 case OCF_UNSIGNED_ADD: {
3496 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
3497 if (OR == OverflowResult::NeverOverflows)
3498 return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(),
3501 if (OR == OverflowResult::AlwaysOverflows)
3502 return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true);
3504 // Fall through uadd into sadd
3507 case OCF_SIGNED_ADD: {
3508 // X + 0 -> {X, false}
3509 if (match(RHS, m_Zero()))
3510 return SetResult(LHS, Builder->getFalse(), false);
3512 // We can strength reduce this signed add into a regular add if we can prove
3513 // that it will never overflow.
3514 if (OCF == OCF_SIGNED_ADD)
3515 if (willNotOverflowSignedAdd(LHS, RHS, OrigI))
3516 return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(),
3521 case OCF_UNSIGNED_SUB:
3522 case OCF_SIGNED_SUB: {
3523 // X - 0 -> {X, false}
3524 if (match(RHS, m_Zero()))
3525 return SetResult(LHS, Builder->getFalse(), false);
3527 if (OCF == OCF_SIGNED_SUB) {
3528 if (willNotOverflowSignedSub(LHS, RHS, OrigI))
3529 return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(),
3532 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI))
3533 return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(),
3539 case OCF_UNSIGNED_MUL: {
3540 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
3541 if (OR == OverflowResult::NeverOverflows)
3542 return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(),
3544 if (OR == OverflowResult::AlwaysOverflows)
3545 return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true);
3548 case OCF_SIGNED_MUL:
3549 // X * undef -> undef
3550 if (isa<UndefValue>(RHS))
3551 return SetResult(RHS, UndefValue::get(Builder->getInt1Ty()), false);
3553 // X * 0 -> {0, false}
3554 if (match(RHS, m_Zero()))
3555 return SetResult(RHS, Builder->getFalse(), false);
3557 // X * 1 -> {X, false}
3558 if (match(RHS, m_One()))
3559 return SetResult(LHS, Builder->getFalse(), false);
3561 if (OCF == OCF_SIGNED_MUL)
3562 if (willNotOverflowSignedMul(LHS, RHS, OrigI))
3563 return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(),
3571 /// \brief Recognize and process idiom involving test for multiplication
3574 /// The caller has matched a pattern of the form:
3575 /// I = cmp u (mul(zext A, zext B), V
3576 /// The function checks if this is a test for overflow and if so replaces
3577 /// multiplication with call to 'mul.with.overflow' intrinsic.
3579 /// \param I Compare instruction.
3580 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
3581 /// the compare instruction. Must be of integer type.
3582 /// \param OtherVal The other argument of compare instruction.
3583 /// \returns Instruction which must replace the compare instruction, NULL if no
3584 /// replacement required.
3585 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
3586 Value *OtherVal, InstCombiner &IC) {
3587 // Don't bother doing this transformation for pointers, don't do it for
3589 if (!isa<IntegerType>(MulVal->getType()))
3592 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
3593 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
3594 auto *MulInstr = dyn_cast<Instruction>(MulVal);
3597 assert(MulInstr->getOpcode() == Instruction::Mul);
3599 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
3600 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
3601 assert(LHS->getOpcode() == Instruction::ZExt);
3602 assert(RHS->getOpcode() == Instruction::ZExt);
3603 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
3605 // Calculate type and width of the result produced by mul.with.overflow.
3606 Type *TyA = A->getType(), *TyB = B->getType();
3607 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
3608 WidthB = TyB->getPrimitiveSizeInBits();
3611 if (WidthB > WidthA) {
3619 // In order to replace the original mul with a narrower mul.with.overflow,
3620 // all uses must ignore upper bits of the product. The number of used low
3621 // bits must be not greater than the width of mul.with.overflow.
3622 if (MulVal->hasNUsesOrMore(2))
3623 for (User *U : MulVal->users()) {
3626 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3627 // Check if truncation ignores bits above MulWidth.
3628 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
3629 if (TruncWidth > MulWidth)
3631 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3632 // Check if AND ignores bits above MulWidth.
3633 if (BO->getOpcode() != Instruction::And)
3635 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
3636 const APInt &CVal = CI->getValue();
3637 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
3641 // Other uses prohibit this transformation.
3646 // Recognize patterns
3647 switch (I.getPredicate()) {
3648 case ICmpInst::ICMP_EQ:
3649 case ICmpInst::ICMP_NE:
3650 // Recognize pattern:
3651 // mulval = mul(zext A, zext B)
3652 // cmp eq/neq mulval, zext trunc mulval
3653 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
3654 if (Zext->hasOneUse()) {
3655 Value *ZextArg = Zext->getOperand(0);
3656 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
3657 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
3661 // Recognize pattern:
3662 // mulval = mul(zext A, zext B)
3663 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
3666 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
3667 if (ValToMask != MulVal)
3669 const APInt &CVal = CI->getValue() + 1;
3670 if (CVal.isPowerOf2()) {
3671 unsigned MaskWidth = CVal.logBase2();
3672 if (MaskWidth == MulWidth)
3673 break; // Recognized
3678 case ICmpInst::ICMP_UGT:
3679 // Recognize pattern:
3680 // mulval = mul(zext A, zext B)
3681 // cmp ugt mulval, max
3682 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3683 APInt MaxVal = APInt::getMaxValue(MulWidth);
3684 MaxVal = MaxVal.zext(CI->getBitWidth());
3685 if (MaxVal.eq(CI->getValue()))
3686 break; // Recognized
3690 case ICmpInst::ICMP_UGE:
3691 // Recognize pattern:
3692 // mulval = mul(zext A, zext B)
3693 // cmp uge mulval, max+1
3694 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3695 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3696 if (MaxVal.eq(CI->getValue()))
3697 break; // Recognized
3701 case ICmpInst::ICMP_ULE:
3702 // Recognize pattern:
3703 // mulval = mul(zext A, zext B)
3704 // cmp ule mulval, max
3705 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3706 APInt MaxVal = APInt::getMaxValue(MulWidth);
3707 MaxVal = MaxVal.zext(CI->getBitWidth());
3708 if (MaxVal.eq(CI->getValue()))
3709 break; // Recognized
3713 case ICmpInst::ICMP_ULT:
3714 // Recognize pattern:
3715 // mulval = mul(zext A, zext B)
3716 // cmp ule mulval, max + 1
3717 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
3718 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
3719 if (MaxVal.eq(CI->getValue()))
3720 break; // Recognized
3728 InstCombiner::BuilderTy *Builder = IC.Builder;
3729 Builder->SetInsertPoint(MulInstr);
3731 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
3732 Value *MulA = A, *MulB = B;
3733 if (WidthA < MulWidth)
3734 MulA = Builder->CreateZExt(A, MulType);
3735 if (WidthB < MulWidth)
3736 MulB = Builder->CreateZExt(B, MulType);
3737 Value *F = Intrinsic::getDeclaration(I.getModule(),
3738 Intrinsic::umul_with_overflow, MulType);
3739 CallInst *Call = Builder->CreateCall(F, {MulA, MulB}, "umul");
3740 IC.Worklist.Add(MulInstr);
3742 // If there are uses of mul result other than the comparison, we know that
3743 // they are truncation or binary AND. Change them to use result of
3744 // mul.with.overflow and adjust properly mask/size.
3745 if (MulVal->hasNUsesOrMore(2)) {
3746 Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value");
3747 for (User *U : MulVal->users()) {
3748 if (U == &I || U == OtherVal)
3750 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
3751 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
3752 IC.replaceInstUsesWith(*TI, Mul);
3754 TI->setOperand(0, Mul);
3755 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
3756 assert(BO->getOpcode() == Instruction::And);
3757 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
3758 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
3759 APInt ShortMask = CI->getValue().trunc(MulWidth);
3760 Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask);
3762 cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
3763 IC.Worklist.Add(Zext);
3764 IC.replaceInstUsesWith(*BO, Zext);
3766 llvm_unreachable("Unexpected Binary operation");
3768 IC.Worklist.Add(cast<Instruction>(U));
3771 if (isa<Instruction>(OtherVal))
3772 IC.Worklist.Add(cast<Instruction>(OtherVal));
3774 // The original icmp gets replaced with the overflow value, maybe inverted
3775 // depending on predicate.
3776 bool Inverse = false;
3777 switch (I.getPredicate()) {
3778 case ICmpInst::ICMP_NE:
3780 case ICmpInst::ICMP_EQ:
3783 case ICmpInst::ICMP_UGT:
3784 case ICmpInst::ICMP_UGE:
3785 if (I.getOperand(0) == MulVal)
3789 case ICmpInst::ICMP_ULT:
3790 case ICmpInst::ICMP_ULE:
3791 if (I.getOperand(1) == MulVal)
3796 llvm_unreachable("Unexpected predicate");
3799 Value *Res = Builder->CreateExtractValue(Call, 1);
3800 return BinaryOperator::CreateNot(Res);
3803 return ExtractValueInst::Create(Call, 1);
3806 /// When performing a comparison against a constant, it is possible that not all
3807 /// the bits in the LHS are demanded. This helper method computes the mask that
3809 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth,
3812 return APInt::getSignMask(BitWidth);
3814 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
3815 if (!CI) return APInt::getAllOnesValue(BitWidth);
3816 const APInt &RHS = CI->getValue();
3818 switch (I.getPredicate()) {
3819 // For a UGT comparison, we don't care about any bits that
3820 // correspond to the trailing ones of the comparand. The value of these
3821 // bits doesn't impact the outcome of the comparison, because any value
3822 // greater than the RHS must differ in a bit higher than these due to carry.
3823 case ICmpInst::ICMP_UGT: {
3824 unsigned trailingOnes = RHS.countTrailingOnes();
3825 return APInt::getBitsSetFrom(BitWidth, trailingOnes);
3828 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
3829 // Any value less than the RHS must differ in a higher bit because of carries.
3830 case ICmpInst::ICMP_ULT: {
3831 unsigned trailingZeros = RHS.countTrailingZeros();
3832 return APInt::getBitsSetFrom(BitWidth, trailingZeros);
3836 return APInt::getAllOnesValue(BitWidth);
3840 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
3841 /// should be swapped.
3842 /// The decision is based on how many times these two operands are reused
3843 /// as subtract operands and their positions in those instructions.
3844 /// The rational is that several architectures use the same instruction for
3845 /// both subtract and cmp, thus it is better if the order of those operands
3847 /// \return true if Op0 and Op1 should be swapped.
3848 static bool swapMayExposeCSEOpportunities(const Value * Op0,
3849 const Value * Op1) {
3850 // Filter out pointer value as those cannot appears directly in subtract.
3851 // FIXME: we may want to go through inttoptrs or bitcasts.
3852 if (Op0->getType()->isPointerTy())
3854 // Count every uses of both Op0 and Op1 in a subtract.
3855 // Each time Op0 is the first operand, count -1: swapping is bad, the
3856 // subtract has already the same layout as the compare.
3857 // Each time Op0 is the second operand, count +1: swapping is good, the
3858 // subtract has a different layout as the compare.
3859 // At the end, if the benefit is greater than 0, Op0 should come second to
3860 // expose more CSE opportunities.
3861 int GlobalSwapBenefits = 0;
3862 for (const User *U : Op0->users()) {
3863 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
3864 if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
3866 // If Op0 is the first argument, this is not beneficial to swap the
3868 int LocalSwapBenefits = -1;
3869 unsigned Op1Idx = 1;
3870 if (BinOp->getOperand(Op1Idx) == Op0) {
3872 LocalSwapBenefits = 1;
3874 if (BinOp->getOperand(Op1Idx) != Op1)
3876 GlobalSwapBenefits += LocalSwapBenefits;
3878 return GlobalSwapBenefits > 0;
3881 /// \brief Check that one use is in the same block as the definition and all
3882 /// other uses are in blocks dominated by a given block.
3884 /// \param DI Definition
3886 /// \param DB Block that must dominate all uses of \p DI outside
3887 /// the parent block
3888 /// \return true when \p UI is the only use of \p DI in the parent block
3889 /// and all other uses of \p DI are in blocks dominated by \p DB.
3891 bool InstCombiner::dominatesAllUses(const Instruction *DI,
3892 const Instruction *UI,
3893 const BasicBlock *DB) const {
3894 assert(DI && UI && "Instruction not defined\n");
3895 // Ignore incomplete definitions.
3896 if (!DI->getParent())
3898 // DI and UI must be in the same block.
3899 if (DI->getParent() != UI->getParent())
3901 // Protect from self-referencing blocks.
3902 if (DI->getParent() == DB)
3904 for (const User *U : DI->users()) {
3905 auto *Usr = cast<Instruction>(U);
3906 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
3912 /// Return true when the instruction sequence within a block is select-cmp-br.
3913 static bool isChainSelectCmpBranch(const SelectInst *SI) {
3914 const BasicBlock *BB = SI->getParent();
3917 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
3918 if (!BI || BI->getNumSuccessors() != 2)
3920 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
3921 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
3926 /// \brief True when a select result is replaced by one of its operands
3927 /// in select-icmp sequence. This will eventually result in the elimination
3930 /// \param SI Select instruction
3931 /// \param Icmp Compare instruction
3932 /// \param SIOpd Operand that replaces the select
3935 /// - The replacement is global and requires dominator information
3936 /// - The caller is responsible for the actual replacement
3941 /// %4 = select i1 %3, %C* %0, %C* null
3942 /// %5 = icmp eq %C* %4, null
3943 /// br i1 %5, label %9, label %7
3945 /// ; <label>:7 ; preds = %entry
3946 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
3949 /// can be transformed to
3951 /// %5 = icmp eq %C* %0, null
3952 /// %6 = select i1 %3, i1 %5, i1 true
3953 /// br i1 %6, label %9, label %7
3955 /// ; <label>:7 ; preds = %entry
3956 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
3958 /// Similar when the first operand of the select is a constant or/and
3959 /// the compare is for not equal rather than equal.
3961 /// NOTE: The function is only called when the select and compare constants
3962 /// are equal, the optimization can work only for EQ predicates. This is not a
3963 /// major restriction since a NE compare should be 'normalized' to an equal
3964 /// compare, which usually happens in the combiner and test case
3965 /// select-cmp-br.ll checks for it.
3966 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
3967 const ICmpInst *Icmp,
3968 const unsigned SIOpd) {
3969 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
3970 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
3971 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
3972 // The check for the single predecessor is not the best that can be
3973 // done. But it protects efficiently against cases like when SI's
3974 // home block has two successors, Succ and Succ1, and Succ1 predecessor
3975 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
3976 // replaced can be reached on either path. So the uniqueness check
3977 // guarantees that the path all uses of SI (outside SI's parent) are on
3978 // is disjoint from all other paths out of SI. But that information
3979 // is more expensive to compute, and the trade-off here is in favor
3980 // of compile-time. It should also be noticed that we check for a single
3981 // predecessor and not only uniqueness. This to handle the situation when
3982 // Succ and Succ1 points to the same basic block.
3983 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
3985 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
3992 /// Try to fold the comparison based on range information we can get by checking
3993 /// whether bits are known to be zero or one in the inputs.
3994 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
3995 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3996 Type *Ty = Op0->getType();
3997 ICmpInst::Predicate Pred = I.getPredicate();
3999 // Get scalar or pointer size.
4000 unsigned BitWidth = Ty->isIntOrIntVectorTy()
4001 ? Ty->getScalarSizeInBits()
4002 : DL.getTypeSizeInBits(Ty->getScalarType());
4007 // If this is a normal comparison, it demands all bits. If it is a sign bit
4008 // comparison, it only demands the sign bit.
4009 bool IsSignBit = false;
4011 if (match(Op1, m_APInt(CmpC))) {
4013 IsSignBit = isSignBitCheck(Pred, *CmpC, UnusedBit);
4016 KnownBits Op0Known(BitWidth);
4017 KnownBits Op1Known(BitWidth);
4019 if (SimplifyDemandedBits(&I, 0,
4020 getDemandedBitsLHSMask(I, BitWidth, IsSignBit),
4024 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
4028 // Given the known and unknown bits, compute a range that the LHS could be
4029 // in. Compute the Min, Max and RHS values based on the known bits. For the
4030 // EQ and NE we use unsigned values.
4031 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
4032 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
4034 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4035 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4037 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4038 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4041 // If Min and Max are known to be the same, then SimplifyDemandedBits
4042 // figured out that the LHS is a constant. Constant fold this now, so that
4043 // code below can assume that Min != Max.
4044 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
4045 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1);
4046 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
4047 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min));
4049 // Based on the range information we know about the LHS, see if we can
4050 // simplify this comparison. For example, (x&4) < 8 is always true.
4053 llvm_unreachable("Unknown icmp opcode!");
4054 case ICmpInst::ICMP_EQ:
4055 case ICmpInst::ICMP_NE: {
4056 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
4057 return Pred == CmpInst::ICMP_EQ
4058 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
4059 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4062 // If all bits are known zero except for one, then we know at most one bit
4063 // is set. If the comparison is against zero, then this is a check to see if
4064 // *that* bit is set.
4065 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
4066 if (Op1Known.isZero()) {
4067 // If the LHS is an AND with the same constant, look through it.
4068 Value *LHS = nullptr;
4070 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
4071 *LHSC != Op0KnownZeroInverted)
4075 if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
4076 APInt ValToCheck = Op0KnownZeroInverted;
4077 Type *XTy = X->getType();
4078 if (ValToCheck.isPowerOf2()) {
4079 // ((1 << X) & 8) == 0 -> X != 3
4080 // ((1 << X) & 8) != 0 -> X == 3
4081 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4082 auto NewPred = ICmpInst::getInversePredicate(Pred);
4083 return new ICmpInst(NewPred, X, CmpC);
4084 } else if ((++ValToCheck).isPowerOf2()) {
4085 // ((1 << X) & 7) == 0 -> X >= 3
4086 // ((1 << X) & 7) != 0 -> X < 3
4087 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
4089 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
4090 return new ICmpInst(NewPred, X, CmpC);
4094 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
4096 if (Op0KnownZeroInverted == 1 &&
4097 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
4098 // ((8 >>u X) & 1) == 0 -> X != 3
4099 // ((8 >>u X) & 1) != 0 -> X == 3
4100 unsigned CmpVal = CI->countTrailingZeros();
4101 auto NewPred = ICmpInst::getInversePredicate(Pred);
4102 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
4107 case ICmpInst::ICMP_ULT: {
4108 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
4109 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4110 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
4111 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4112 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
4113 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4116 if (match(Op1, m_APInt(CmpC))) {
4117 // A <u C -> A == C-1 if min(A)+1 == C
4118 if (Op1Max == Op0Min + 1) {
4119 Constant *CMinus1 = ConstantInt::get(Op0->getType(), *CmpC - 1);
4120 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, CMinus1);
4125 case ICmpInst::ICMP_UGT: {
4126 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
4127 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4129 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
4130 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4132 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
4133 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4136 if (match(Op1, m_APInt(CmpC))) {
4137 // A >u C -> A == C+1 if max(a)-1 == C
4138 if (*CmpC == Op0Max - 1)
4139 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4140 ConstantInt::get(Op1->getType(), *CmpC + 1));
4144 case ICmpInst::ICMP_SLT:
4145 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
4146 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4147 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
4148 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4149 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
4150 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4151 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4152 if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
4153 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4154 Builder->getInt(CI->getValue() - 1));
4157 case ICmpInst::ICMP_SGT:
4158 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
4159 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4160 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
4161 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4163 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
4164 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4165 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4166 if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
4167 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
4168 Builder->getInt(CI->getValue() + 1));
4171 case ICmpInst::ICMP_SGE:
4172 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
4173 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
4174 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4175 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
4176 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4178 case ICmpInst::ICMP_SLE:
4179 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
4180 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
4181 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4182 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
4183 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4185 case ICmpInst::ICMP_UGE:
4186 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
4187 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
4188 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4189 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
4190 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4192 case ICmpInst::ICMP_ULE:
4193 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
4194 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
4195 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4196 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
4197 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4201 // Turn a signed comparison into an unsigned one if both operands are known to
4202 // have the same sign.
4204 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
4205 (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
4206 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
4211 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
4212 /// it into the appropriate icmp lt or icmp gt instruction. This transform
4213 /// allows them to be folded in visitICmpInst.
4214 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
4215 ICmpInst::Predicate Pred = I.getPredicate();
4216 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE &&
4217 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE)
4220 Value *Op0 = I.getOperand(0);
4221 Value *Op1 = I.getOperand(1);
4222 auto *Op1C = dyn_cast<Constant>(Op1);
4226 // Check if the constant operand can be safely incremented/decremented without
4227 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled
4228 // the edge cases for us, so we just assert on them. For vectors, we must
4229 // handle the edge cases.
4230 Type *Op1Type = Op1->getType();
4231 bool IsSigned = I.isSigned();
4232 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE);
4233 auto *CI = dyn_cast<ConstantInt>(Op1C);
4235 // A <= MAX -> TRUE ; A >= MIN -> TRUE
4236 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned));
4237 } else if (Op1Type->isVectorTy()) {
4238 // TODO? If the edge cases for vectors were guaranteed to be handled as they
4239 // are for scalar, we could remove the min/max checks. However, to do that,
4240 // we would have to use insertelement/shufflevector to replace edge values.
4241 unsigned NumElts = Op1Type->getVectorNumElements();
4242 for (unsigned i = 0; i != NumElts; ++i) {
4243 Constant *Elt = Op1C->getAggregateElement(i);
4247 if (isa<UndefValue>(Elt))
4250 // Bail out if we can't determine if this constant is min/max or if we
4251 // know that this constant is min/max.
4252 auto *CI = dyn_cast<ConstantInt>(Elt);
4253 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned)))
4261 // Increment or decrement the constant and set the new comparison predicate:
4262 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT
4263 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true);
4264 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT;
4265 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred;
4266 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne));
4269 /// Integer compare with boolean values can always be turned into bitwise ops.
4270 static Instruction *canonicalizeICmpBool(ICmpInst &I,
4271 InstCombiner::BuilderTy &Builder) {
4272 Value *A = I.getOperand(0), *B = I.getOperand(1);
4273 assert(A->getType()->getScalarType()->isIntegerTy(1) && "Bools only");
4275 // A boolean compared to true/false can be simplified to Op0/true/false in
4276 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
4277 // Cases not handled by InstSimplify are always 'not' of Op0.
4278 if (match(B, m_Zero())) {
4279 switch (I.getPredicate()) {
4280 case CmpInst::ICMP_EQ: // A == 0 -> !A
4281 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
4282 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
4283 return BinaryOperator::CreateNot(A);
4285 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
4287 } else if (match(B, m_One())) {
4288 switch (I.getPredicate()) {
4289 case CmpInst::ICMP_NE: // A != 1 -> !A
4290 case CmpInst::ICMP_ULT: // A <u 1 -> !A
4291 case CmpInst::ICMP_SGT: // A >s -1 -> !A
4292 return BinaryOperator::CreateNot(A);
4294 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
4298 switch (I.getPredicate()) {
4300 llvm_unreachable("Invalid icmp instruction!");
4301 case ICmpInst::ICMP_EQ:
4302 // icmp eq i1 A, B -> ~(A ^ B)
4303 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
4305 case ICmpInst::ICMP_NE:
4306 // icmp ne i1 A, B -> A ^ B
4307 return BinaryOperator::CreateXor(A, B);
4309 case ICmpInst::ICMP_UGT:
4310 // icmp ugt -> icmp ult
4313 case ICmpInst::ICMP_ULT:
4314 // icmp ult i1 A, B -> ~A & B
4315 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
4317 case ICmpInst::ICMP_SGT:
4318 // icmp sgt -> icmp slt
4321 case ICmpInst::ICMP_SLT:
4322 // icmp slt i1 A, B -> A & ~B
4323 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
4325 case ICmpInst::ICMP_UGE:
4326 // icmp uge -> icmp ule
4329 case ICmpInst::ICMP_ULE:
4330 // icmp ule i1 A, B -> ~A | B
4331 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
4333 case ICmpInst::ICMP_SGE:
4334 // icmp sge -> icmp sle
4337 case ICmpInst::ICMP_SLE:
4338 // icmp sle i1 A, B -> A | ~B
4339 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
4343 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
4344 bool Changed = false;
4345 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4346 unsigned Op0Cplxity = getComplexity(Op0);
4347 unsigned Op1Cplxity = getComplexity(Op1);
4349 /// Orders the operands of the compare so that they are listed from most
4350 /// complex to least complex. This puts constants before unary operators,
4351 /// before binary operators.
4352 if (Op0Cplxity < Op1Cplxity ||
4353 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
4355 std::swap(Op0, Op1);
4359 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1,
4360 SQ.getWithInstruction(&I)))
4361 return replaceInstUsesWith(I, V);
4363 // comparing -val or val with non-zero is the same as just comparing val
4364 // ie, abs(val) != 0 -> val != 0
4365 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
4366 Value *Cond, *SelectTrue, *SelectFalse;
4367 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
4368 m_Value(SelectFalse)))) {
4369 if (Value *V = dyn_castNegVal(SelectTrue)) {
4370 if (V == SelectFalse)
4371 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4373 else if (Value *V = dyn_castNegVal(SelectFalse)) {
4374 if (V == SelectTrue)
4375 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
4380 if (Op0->getType()->getScalarType()->isIntegerTy(1))
4381 if (Instruction *Res = canonicalizeICmpBool(I, *Builder))
4384 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
4387 if (Instruction *Res = foldICmpWithConstant(I))
4390 if (Instruction *Res = foldICmpUsingKnownBits(I))
4393 // Test if the ICmpInst instruction is used exclusively by a select as
4394 // part of a minimum or maximum operation. If so, refrain from doing
4395 // any other folding. This helps out other analyses which understand
4396 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4397 // and CodeGen. And in this case, at least one of the comparison
4398 // operands has at least one user besides the compare (the select),
4399 // which would often largely negate the benefit of folding anyway.
4401 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
4402 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
4403 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
4406 // FIXME: We only do this after checking for min/max to prevent infinite
4407 // looping caused by a reverse canonicalization of these patterns for min/max.
4408 // FIXME: The organization of folds is a mess. These would naturally go into
4409 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
4410 // down here after the min/max restriction.
4411 ICmpInst::Predicate Pred = I.getPredicate();
4413 if (match(Op1, m_APInt(C))) {
4414 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
4415 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
4416 Constant *Zero = Constant::getNullValue(Op0->getType());
4417 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
4420 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
4421 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
4422 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
4423 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
4427 if (Instruction *Res = foldICmpInstWithConstant(I))
4430 if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
4433 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
4434 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
4435 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
4437 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
4438 if (Instruction *NI = foldGEPICmp(GEP, Op0,
4439 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
4442 // Try to optimize equality comparisons against alloca-based pointers.
4443 if (Op0->getType()->isPointerTy() && I.isEquality()) {
4444 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
4445 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
4446 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
4448 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
4449 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
4453 // Test to see if the operands of the icmp are casted versions of other
4454 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
4456 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
4457 if (Op0->getType()->isPointerTy() &&
4458 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
4459 // We keep moving the cast from the left operand over to the right
4460 // operand, where it can often be eliminated completely.
4461 Op0 = CI->getOperand(0);
4463 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
4464 // so eliminate it as well.
4465 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
4466 Op1 = CI2->getOperand(0);
4468 // If Op1 is a constant, we can fold the cast into the constant.
4469 if (Op0->getType() != Op1->getType()) {
4470 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
4471 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
4473 // Otherwise, cast the RHS right before the icmp
4474 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
4477 return new ICmpInst(I.getPredicate(), Op0, Op1);
4481 if (isa<CastInst>(Op0)) {
4482 // Handle the special case of: icmp (cast bool to X), <cst>
4483 // This comes up when you have code like
4486 // For generality, we handle any zero-extension of any operand comparison
4487 // with a constant or another cast from the same type.
4488 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
4489 if (Instruction *R = foldICmpWithCastAndCast(I))
4493 if (Instruction *Res = foldICmpBinOp(I))
4496 if (Instruction *Res = foldICmpWithMinMax(I))
4501 // Transform (A & ~B) == 0 --> (A & B) != 0
4502 // and (A & ~B) != 0 --> (A & B) == 0
4503 // if A is a power of 2.
4504 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
4505 match(Op1, m_Zero()) &&
4506 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
4507 return new ICmpInst(I.getInversePredicate(),
4508 Builder->CreateAnd(A, B),
4511 // ~x < ~y --> y < x
4512 // ~x < cst --> ~cst < x
4513 if (match(Op0, m_Not(m_Value(A)))) {
4514 if (match(Op1, m_Not(m_Value(B))))
4515 return new ICmpInst(I.getPredicate(), B, A);
4516 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
4517 return new ICmpInst(I.getPredicate(), ConstantExpr::getNot(RHSC), A);
4520 Instruction *AddI = nullptr;
4521 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
4522 m_Instruction(AddI))) &&
4523 isa<IntegerType>(A->getType())) {
4526 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
4528 replaceInstUsesWith(*AddI, Result);
4529 return replaceInstUsesWith(I, Overflow);
4533 // (zext a) * (zext b) --> llvm.umul.with.overflow.
4534 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4535 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
4538 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
4539 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
4544 if (Instruction *Res = foldICmpEquality(I))
4547 // The 'cmpxchg' instruction returns an aggregate containing the old value and
4548 // an i1 which indicates whether or not we successfully did the swap.
4550 // Replace comparisons between the old value and the expected value with the
4551 // indicator that 'cmpxchg' returns.
4553 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
4554 // spuriously fail. In those cases, the old value may equal the expected
4555 // value but it is possible for the swap to not occur.
4556 if (I.getPredicate() == ICmpInst::ICMP_EQ)
4557 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
4558 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
4559 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
4561 return ExtractValueInst::Create(ACXI, 1);
4564 Value *X; ConstantInt *Cst;
4566 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
4567 return foldICmpAddOpConst(I, X, Cst, I.getPredicate());
4570 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
4571 return foldICmpAddOpConst(I, X, Cst, I.getSwappedPredicate());
4573 return Changed ? &I : nullptr;
4576 /// Fold fcmp ([us]itofp x, cst) if possible.
4577 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
4579 if (!isa<ConstantFP>(RHSC)) return nullptr;
4580 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
4582 // Get the width of the mantissa. We don't want to hack on conversions that
4583 // might lose information from the integer, e.g. "i64 -> float"
4584 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
4585 if (MantissaWidth == -1) return nullptr; // Unknown.
4587 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
4589 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
4591 if (I.isEquality()) {
4592 FCmpInst::Predicate P = I.getPredicate();
4593 bool IsExact = false;
4594 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
4595 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
4597 // If the floating point constant isn't an integer value, we know if we will
4598 // ever compare equal / not equal to it.
4600 // TODO: Can never be -0.0 and other non-representable values
4601 APFloat RHSRoundInt(RHS);
4602 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
4603 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
4604 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
4605 return replaceInstUsesWith(I, Builder->getFalse());
4607 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
4608 return replaceInstUsesWith(I, Builder->getTrue());
4612 // TODO: If the constant is exactly representable, is it always OK to do
4613 // equality compares as integer?
4616 // Check to see that the input is converted from an integer type that is small
4617 // enough that preserves all bits. TODO: check here for "known" sign bits.
4618 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
4619 unsigned InputSize = IntTy->getScalarSizeInBits();
4621 // Following test does NOT adjust InputSize downwards for signed inputs,
4622 // because the most negative value still requires all the mantissa bits
4623 // to distinguish it from one less than that value.
4624 if ((int)InputSize > MantissaWidth) {
4625 // Conversion would lose accuracy. Check if loss can impact comparison.
4626 int Exp = ilogb(RHS);
4627 if (Exp == APFloat::IEK_Inf) {
4628 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
4629 if (MaxExponent < (int)InputSize - !LHSUnsigned)
4630 // Conversion could create infinity.
4633 // Note that if RHS is zero or NaN, then Exp is negative
4634 // and first condition is trivially false.
4635 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
4636 // Conversion could affect comparison.
4641 // Otherwise, we can potentially simplify the comparison. We know that it
4642 // will always come through as an integer value and we know the constant is
4643 // not a NAN (it would have been previously simplified).
4644 assert(!RHS.isNaN() && "NaN comparison not already folded!");
4646 ICmpInst::Predicate Pred;
4647 switch (I.getPredicate()) {
4648 default: llvm_unreachable("Unexpected predicate!");
4649 case FCmpInst::FCMP_UEQ:
4650 case FCmpInst::FCMP_OEQ:
4651 Pred = ICmpInst::ICMP_EQ;
4653 case FCmpInst::FCMP_UGT:
4654 case FCmpInst::FCMP_OGT:
4655 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
4657 case FCmpInst::FCMP_UGE:
4658 case FCmpInst::FCMP_OGE:
4659 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
4661 case FCmpInst::FCMP_ULT:
4662 case FCmpInst::FCMP_OLT:
4663 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
4665 case FCmpInst::FCMP_ULE:
4666 case FCmpInst::FCMP_OLE:
4667 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
4669 case FCmpInst::FCMP_UNE:
4670 case FCmpInst::FCMP_ONE:
4671 Pred = ICmpInst::ICMP_NE;
4673 case FCmpInst::FCMP_ORD:
4674 return replaceInstUsesWith(I, Builder->getTrue());
4675 case FCmpInst::FCMP_UNO:
4676 return replaceInstUsesWith(I, Builder->getFalse());
4679 // Now we know that the APFloat is a normal number, zero or inf.
4681 // See if the FP constant is too large for the integer. For example,
4682 // comparing an i8 to 300.0.
4683 unsigned IntWidth = IntTy->getScalarSizeInBits();
4686 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
4687 // and large values.
4688 APFloat SMax(RHS.getSemantics());
4689 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
4690 APFloat::rmNearestTiesToEven);
4691 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
4692 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
4693 Pred == ICmpInst::ICMP_SLE)
4694 return replaceInstUsesWith(I, Builder->getTrue());
4695 return replaceInstUsesWith(I, Builder->getFalse());
4698 // If the RHS value is > UnsignedMax, fold the comparison. This handles
4699 // +INF and large values.
4700 APFloat UMax(RHS.getSemantics());
4701 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
4702 APFloat::rmNearestTiesToEven);
4703 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
4704 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
4705 Pred == ICmpInst::ICMP_ULE)
4706 return replaceInstUsesWith(I, Builder->getTrue());
4707 return replaceInstUsesWith(I, Builder->getFalse());
4712 // See if the RHS value is < SignedMin.
4713 APFloat SMin(RHS.getSemantics());
4714 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
4715 APFloat::rmNearestTiesToEven);
4716 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
4717 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
4718 Pred == ICmpInst::ICMP_SGE)
4719 return replaceInstUsesWith(I, Builder->getTrue());
4720 return replaceInstUsesWith(I, Builder->getFalse());
4723 // See if the RHS value is < UnsignedMin.
4724 APFloat SMin(RHS.getSemantics());
4725 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
4726 APFloat::rmNearestTiesToEven);
4727 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
4728 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
4729 Pred == ICmpInst::ICMP_UGE)
4730 return replaceInstUsesWith(I, Builder->getTrue());
4731 return replaceInstUsesWith(I, Builder->getFalse());
4735 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
4736 // [0, UMAX], but it may still be fractional. See if it is fractional by
4737 // casting the FP value to the integer value and back, checking for equality.
4738 // Don't do this for zero, because -0.0 is not fractional.
4739 Constant *RHSInt = LHSUnsigned
4740 ? ConstantExpr::getFPToUI(RHSC, IntTy)
4741 : ConstantExpr::getFPToSI(RHSC, IntTy);
4742 if (!RHS.isZero()) {
4743 bool Equal = LHSUnsigned
4744 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
4745 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
4747 // If we had a comparison against a fractional value, we have to adjust
4748 // the compare predicate and sometimes the value. RHSC is rounded towards
4749 // zero at this point.
4751 default: llvm_unreachable("Unexpected integer comparison!");
4752 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
4753 return replaceInstUsesWith(I, Builder->getTrue());
4754 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
4755 return replaceInstUsesWith(I, Builder->getFalse());
4756 case ICmpInst::ICMP_ULE:
4757 // (float)int <= 4.4 --> int <= 4
4758 // (float)int <= -4.4 --> false
4759 if (RHS.isNegative())
4760 return replaceInstUsesWith(I, Builder->getFalse());
4762 case ICmpInst::ICMP_SLE:
4763 // (float)int <= 4.4 --> int <= 4
4764 // (float)int <= -4.4 --> int < -4
4765 if (RHS.isNegative())
4766 Pred = ICmpInst::ICMP_SLT;
4768 case ICmpInst::ICMP_ULT:
4769 // (float)int < -4.4 --> false
4770 // (float)int < 4.4 --> int <= 4
4771 if (RHS.isNegative())
4772 return replaceInstUsesWith(I, Builder->getFalse());
4773 Pred = ICmpInst::ICMP_ULE;
4775 case ICmpInst::ICMP_SLT:
4776 // (float)int < -4.4 --> int < -4
4777 // (float)int < 4.4 --> int <= 4
4778 if (!RHS.isNegative())
4779 Pred = ICmpInst::ICMP_SLE;
4781 case ICmpInst::ICMP_UGT:
4782 // (float)int > 4.4 --> int > 4
4783 // (float)int > -4.4 --> true
4784 if (RHS.isNegative())
4785 return replaceInstUsesWith(I, Builder->getTrue());
4787 case ICmpInst::ICMP_SGT:
4788 // (float)int > 4.4 --> int > 4
4789 // (float)int > -4.4 --> int >= -4
4790 if (RHS.isNegative())
4791 Pred = ICmpInst::ICMP_SGE;
4793 case ICmpInst::ICMP_UGE:
4794 // (float)int >= -4.4 --> true
4795 // (float)int >= 4.4 --> int > 4
4796 if (RHS.isNegative())
4797 return replaceInstUsesWith(I, Builder->getTrue());
4798 Pred = ICmpInst::ICMP_UGT;
4800 case ICmpInst::ICMP_SGE:
4801 // (float)int >= -4.4 --> int >= -4
4802 // (float)int >= 4.4 --> int > 4
4803 if (!RHS.isNegative())
4804 Pred = ICmpInst::ICMP_SGT;
4810 // Lower this FP comparison into an appropriate integer version of the
4812 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
4815 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
4816 bool Changed = false;
4818 /// Orders the operands of the compare so that they are listed from most
4819 /// complex to least complex. This puts constants before unary operators,
4820 /// before binary operators.
4821 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
4826 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4829 SimplifyFCmpInst(I.getPredicate(), Op0, Op1, I.getFastMathFlags(),
4830 SQ.getWithInstruction(&I)))
4831 return replaceInstUsesWith(I, V);
4833 // Simplify 'fcmp pred X, X'
4835 switch (I.getPredicate()) {
4836 default: llvm_unreachable("Unknown predicate!");
4837 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
4838 case FCmpInst::FCMP_ULT: // True if unordered or less than
4839 case FCmpInst::FCMP_UGT: // True if unordered or greater than
4840 case FCmpInst::FCMP_UNE: // True if unordered or not equal
4841 // Canonicalize these to be 'fcmp uno %X, 0.0'.
4842 I.setPredicate(FCmpInst::FCMP_UNO);
4843 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4846 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
4847 case FCmpInst::FCMP_OEQ: // True if ordered and equal
4848 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
4849 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
4850 // Canonicalize these to be 'fcmp ord %X, 0.0'.
4851 I.setPredicate(FCmpInst::FCMP_ORD);
4852 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4857 // Test if the FCmpInst instruction is used exclusively by a select as
4858 // part of a minimum or maximum operation. If so, refrain from doing
4859 // any other folding. This helps out other analyses which understand
4860 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
4861 // and CodeGen. And in this case, at least one of the comparison
4862 // operands has at least one user besides the compare (the select),
4863 // which would often largely negate the benefit of folding anyway.
4865 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
4866 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
4867 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
4870 // Handle fcmp with constant RHS
4871 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
4872 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
4873 switch (LHSI->getOpcode()) {
4874 case Instruction::FPExt: {
4875 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
4876 FPExtInst *LHSExt = cast<FPExtInst>(LHSI);
4877 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC);
4881 const fltSemantics *Sem;
4882 // FIXME: This shouldn't be here.
4883 if (LHSExt->getSrcTy()->isHalfTy())
4884 Sem = &APFloat::IEEEhalf();
4885 else if (LHSExt->getSrcTy()->isFloatTy())
4886 Sem = &APFloat::IEEEsingle();
4887 else if (LHSExt->getSrcTy()->isDoubleTy())
4888 Sem = &APFloat::IEEEdouble();
4889 else if (LHSExt->getSrcTy()->isFP128Ty())
4890 Sem = &APFloat::IEEEquad();
4891 else if (LHSExt->getSrcTy()->isX86_FP80Ty())
4892 Sem = &APFloat::x87DoubleExtended();
4893 else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
4894 Sem = &APFloat::PPCDoubleDouble();
4899 APFloat F = RHSF->getValueAPF();
4900 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy);
4902 // Avoid lossy conversions and denormals. Zero is a special case
4903 // that's OK to convert.
4907 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) !=
4908 APFloat::cmpLessThan) || Fabs.isZero()))
4910 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
4911 ConstantFP::get(RHSC->getContext(), F));
4914 case Instruction::PHI:
4915 // Only fold fcmp into the PHI if the phi and fcmp are in the same
4916 // block. If in the same block, we're encouraging jump threading. If
4917 // not, we are just pessimizing the code by making an i1 phi.
4918 if (LHSI->getParent() == I.getParent())
4919 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
4922 case Instruction::SIToFP:
4923 case Instruction::UIToFP:
4924 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
4927 case Instruction::FSub: {
4928 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
4930 if (match(LHSI, m_FNeg(m_Value(Op))))
4931 return new FCmpInst(I.getSwappedPredicate(), Op,
4932 ConstantExpr::getFNeg(RHSC));
4935 case Instruction::Load:
4936 if (GetElementPtrInst *GEP =
4937 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
4938 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
4939 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
4940 !cast<LoadInst>(LHSI)->isVolatile())
4941 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
4945 case Instruction::Call: {
4946 if (!RHSC->isNullValue())
4949 CallInst *CI = cast<CallInst>(LHSI);
4950 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI);
4951 if (IID != Intrinsic::fabs)
4954 // Various optimization for fabs compared with zero.
4955 switch (I.getPredicate()) {
4958 // fabs(x) < 0 --> false
4959 case FCmpInst::FCMP_OLT:
4960 llvm_unreachable("handled by SimplifyFCmpInst");
4961 // fabs(x) > 0 --> x != 0
4962 case FCmpInst::FCMP_OGT:
4963 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
4964 // fabs(x) <= 0 --> x == 0
4965 case FCmpInst::FCMP_OLE:
4966 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC);
4967 // fabs(x) >= 0 --> !isnan(x)
4968 case FCmpInst::FCMP_OGE:
4969 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC);
4970 // fabs(x) == 0 --> x == 0
4971 // fabs(x) != 0 --> x != 0
4972 case FCmpInst::FCMP_OEQ:
4973 case FCmpInst::FCMP_UEQ:
4974 case FCmpInst::FCMP_ONE:
4975 case FCmpInst::FCMP_UNE:
4976 return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), RHSC);
4982 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
4984 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
4985 return new FCmpInst(I.getSwappedPredicate(), X, Y);
4987 // fcmp (fpext x), (fpext y) -> fcmp x, y
4988 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0))
4989 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1))
4990 if (LHSExt->getSrcTy() == RHSExt->getSrcTy())
4991 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
4992 RHSExt->getOperand(0));
4994 return Changed ? &I : nullptr;