1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements routines for folding instructions into simpler forms
11 // that do not require creating new instructions. This does constant folding
12 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
13 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
14 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
15 // simplified: This is usually true and assuming it simplifies the logic (if
16 // they have not been simplified then results are correct but maybe suboptimal).
18 //===----------------------------------------------------------------------===//
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/ConstantRange.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/Operator.h"
36 #include "llvm/IR/PatternMatch.h"
37 #include "llvm/IR/ValueHandle.h"
40 using namespace llvm::PatternMatch;
42 #define DEBUG_TYPE "instsimplify"
44 enum { RecursionLimit = 3 };
46 STATISTIC(NumExpand, "Number of expansions");
47 STATISTIC(NumReassoc, "Number of reassociations");
52 const TargetLibraryInfo *TLI;
53 const DominatorTree *DT;
55 const Instruction *CxtI;
57 Query(const DataLayout &DL, const TargetLibraryInfo *tli,
58 const DominatorTree *dt, AssumptionCache *ac = nullptr,
59 const Instruction *cxti = nullptr)
60 : DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
62 } // end anonymous namespace
64 static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
65 static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &,
67 static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
68 const Query &, unsigned);
69 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &,
71 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
72 const Query &Q, unsigned MaxRecurse);
73 static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned);
74 static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned);
75 static Value *SimplifyCastInst(unsigned, Value *, Type *,
76 const Query &, unsigned);
78 /// For a boolean type or a vector of boolean type, return false or a vector
79 /// with every element false.
80 static Constant *getFalse(Type *Ty) {
81 return ConstantInt::getFalse(Ty);
84 /// For a boolean type or a vector of boolean type, return true or a vector
85 /// with every element true.
86 static Constant *getTrue(Type *Ty) {
87 return ConstantInt::getTrue(Ty);
90 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
91 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
93 CmpInst *Cmp = dyn_cast<CmpInst>(V);
96 CmpInst::Predicate CPred = Cmp->getPredicate();
97 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
98 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
100 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
104 /// Does the given value dominate the specified phi node?
105 static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
106 Instruction *I = dyn_cast<Instruction>(V);
108 // Arguments and constants dominate all instructions.
111 // If we are processing instructions (and/or basic blocks) that have not been
112 // fully added to a function, the parent nodes may still be null. Simply
113 // return the conservative answer in these cases.
114 if (!I->getParent() || !P->getParent() || !I->getParent()->getParent())
117 // If we have a DominatorTree then do a precise test.
119 if (!DT->isReachableFromEntry(P->getParent()))
121 if (!DT->isReachableFromEntry(I->getParent()))
123 return DT->dominates(I, P);
126 // Otherwise, if the instruction is in the entry block and is not an invoke,
127 // then it obviously dominates all phi nodes.
128 if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() &&
135 /// Simplify "A op (B op' C)" by distributing op over op', turning it into
136 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
137 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
138 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
139 /// Returns the simplified value, or null if no simplification was performed.
140 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
141 Instruction::BinaryOps OpcodeToExpand, const Query &Q,
142 unsigned MaxRecurse) {
143 // Recursion is always used, so bail out at once if we already hit the limit.
147 // Check whether the expression has the form "(A op' B) op C".
148 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
149 if (Op0->getOpcode() == OpcodeToExpand) {
150 // It does! Try turning it into "(A op C) op' (B op C)".
151 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
152 // Do "A op C" and "B op C" both simplify?
153 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
154 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
155 // They do! Return "L op' R" if it simplifies or is already available.
156 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
157 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
158 && L == B && R == A)) {
162 // Otherwise return "L op' R" if it simplifies.
163 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
170 // Check whether the expression has the form "A op (B op' C)".
171 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
172 if (Op1->getOpcode() == OpcodeToExpand) {
173 // It does! Try turning it into "(A op B) op' (A op C)".
174 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
175 // Do "A op B" and "A op C" both simplify?
176 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
177 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
178 // They do! Return "L op' R" if it simplifies or is already available.
179 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
180 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
181 && L == C && R == B)) {
185 // Otherwise return "L op' R" if it simplifies.
186 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
196 /// Generic simplifications for associative binary operations.
197 /// Returns the simpler value, or null if none was found.
198 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
199 Value *LHS, Value *RHS, const Query &Q,
200 unsigned MaxRecurse) {
201 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
203 // Recursion is always used, so bail out at once if we already hit the limit.
207 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
208 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
210 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
211 if (Op0 && Op0->getOpcode() == Opcode) {
212 Value *A = Op0->getOperand(0);
213 Value *B = Op0->getOperand(1);
216 // Does "B op C" simplify?
217 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
218 // It does! Return "A op V" if it simplifies or is already available.
219 // If V equals B then "A op V" is just the LHS.
220 if (V == B) return LHS;
221 // Otherwise return "A op V" if it simplifies.
222 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
229 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
230 if (Op1 && Op1->getOpcode() == Opcode) {
232 Value *B = Op1->getOperand(0);
233 Value *C = Op1->getOperand(1);
235 // Does "A op B" simplify?
236 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
237 // It does! Return "V op C" if it simplifies or is already available.
238 // If V equals B then "V op C" is just the RHS.
239 if (V == B) return RHS;
240 // Otherwise return "V op C" if it simplifies.
241 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
248 // The remaining transforms require commutativity as well as associativity.
249 if (!Instruction::isCommutative(Opcode))
252 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
253 if (Op0 && Op0->getOpcode() == Opcode) {
254 Value *A = Op0->getOperand(0);
255 Value *B = Op0->getOperand(1);
258 // Does "C op A" simplify?
259 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
260 // It does! Return "V op B" if it simplifies or is already available.
261 // If V equals A then "V op B" is just the LHS.
262 if (V == A) return LHS;
263 // Otherwise return "V op B" if it simplifies.
264 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
271 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
272 if (Op1 && Op1->getOpcode() == Opcode) {
274 Value *B = Op1->getOperand(0);
275 Value *C = Op1->getOperand(1);
277 // Does "C op A" simplify?
278 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
279 // It does! Return "B op V" if it simplifies or is already available.
280 // If V equals C then "B op V" is just the RHS.
281 if (V == C) return RHS;
282 // Otherwise return "B op V" if it simplifies.
283 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
293 /// In the case of a binary operation with a select instruction as an operand,
294 /// try to simplify the binop by seeing whether evaluating it on both branches
295 /// of the select results in the same value. Returns the common value if so,
296 /// otherwise returns null.
297 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
298 Value *RHS, const Query &Q,
299 unsigned MaxRecurse) {
300 // Recursion is always used, so bail out at once if we already hit the limit.
305 if (isa<SelectInst>(LHS)) {
306 SI = cast<SelectInst>(LHS);
308 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
309 SI = cast<SelectInst>(RHS);
312 // Evaluate the BinOp on the true and false branches of the select.
316 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
317 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
319 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
320 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
323 // If they simplified to the same value, then return the common value.
324 // If they both failed to simplify then return null.
328 // If one branch simplified to undef, return the other one.
329 if (TV && isa<UndefValue>(TV))
331 if (FV && isa<UndefValue>(FV))
334 // If applying the operation did not change the true and false select values,
335 // then the result of the binop is the select itself.
336 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
339 // If one branch simplified and the other did not, and the simplified
340 // value is equal to the unsimplified one, return the simplified value.
341 // For example, select (cond, X, X & Z) & Z -> X & Z.
342 if ((FV && !TV) || (TV && !FV)) {
343 // Check that the simplified value has the form "X op Y" where "op" is the
344 // same as the original operation.
345 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
346 if (Simplified && Simplified->getOpcode() == Opcode) {
347 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
348 // We already know that "op" is the same as for the simplified value. See
349 // if the operands match too. If so, return the simplified value.
350 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
351 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
352 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
353 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
354 Simplified->getOperand(1) == UnsimplifiedRHS)
356 if (Simplified->isCommutative() &&
357 Simplified->getOperand(1) == UnsimplifiedLHS &&
358 Simplified->getOperand(0) == UnsimplifiedRHS)
366 /// In the case of a comparison with a select instruction, try to simplify the
367 /// comparison by seeing whether both branches of the select result in the same
368 /// value. Returns the common value if so, otherwise returns null.
369 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
370 Value *RHS, const Query &Q,
371 unsigned MaxRecurse) {
372 // Recursion is always used, so bail out at once if we already hit the limit.
376 // Make sure the select is on the LHS.
377 if (!isa<SelectInst>(LHS)) {
379 Pred = CmpInst::getSwappedPredicate(Pred);
381 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
382 SelectInst *SI = cast<SelectInst>(LHS);
383 Value *Cond = SI->getCondition();
384 Value *TV = SI->getTrueValue();
385 Value *FV = SI->getFalseValue();
387 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
388 // Does "cmp TV, RHS" simplify?
389 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
391 // It not only simplified, it simplified to the select condition. Replace
393 TCmp = getTrue(Cond->getType());
395 // It didn't simplify. However if "cmp TV, RHS" is equal to the select
396 // condition then we can replace it with 'true'. Otherwise give up.
397 if (!isSameCompare(Cond, Pred, TV, RHS))
399 TCmp = getTrue(Cond->getType());
402 // Does "cmp FV, RHS" simplify?
403 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
405 // It not only simplified, it simplified to the select condition. Replace
407 FCmp = getFalse(Cond->getType());
409 // It didn't simplify. However if "cmp FV, RHS" is equal to the select
410 // condition then we can replace it with 'false'. Otherwise give up.
411 if (!isSameCompare(Cond, Pred, FV, RHS))
413 FCmp = getFalse(Cond->getType());
416 // If both sides simplified to the same value, then use it as the result of
417 // the original comparison.
421 // The remaining cases only make sense if the select condition has the same
422 // type as the result of the comparison, so bail out if this is not so.
423 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
425 // If the false value simplified to false, then the result of the compare
426 // is equal to "Cond && TCmp". This also catches the case when the false
427 // value simplified to false and the true value to true, returning "Cond".
428 if (match(FCmp, m_Zero()))
429 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
431 // If the true value simplified to true, then the result of the compare
432 // is equal to "Cond || FCmp".
433 if (match(TCmp, m_One()))
434 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
436 // Finally, if the false value simplified to true and the true value to
437 // false, then the result of the compare is equal to "!Cond".
438 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
440 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
447 /// In the case of a binary operation with an operand that is a PHI instruction,
448 /// try to simplify the binop by seeing whether evaluating it on the incoming
449 /// phi values yields the same result for every value. If so returns the common
450 /// value, otherwise returns null.
451 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
452 Value *RHS, const Query &Q,
453 unsigned MaxRecurse) {
454 // Recursion is always used, so bail out at once if we already hit the limit.
459 if (isa<PHINode>(LHS)) {
460 PI = cast<PHINode>(LHS);
461 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
462 if (!ValueDominatesPHI(RHS, PI, Q.DT))
465 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
466 PI = cast<PHINode>(RHS);
467 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
468 if (!ValueDominatesPHI(LHS, PI, Q.DT))
472 // Evaluate the BinOp on the incoming phi values.
473 Value *CommonValue = nullptr;
474 for (Value *Incoming : PI->incoming_values()) {
475 // If the incoming value is the phi node itself, it can safely be skipped.
476 if (Incoming == PI) continue;
477 Value *V = PI == LHS ?
478 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
479 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
480 // If the operation failed to simplify, or simplified to a different value
481 // to previously, then give up.
482 if (!V || (CommonValue && V != CommonValue))
490 /// In the case of a comparison with a PHI instruction, try to simplify the
491 /// comparison by seeing whether comparing with all of the incoming phi values
492 /// yields the same result every time. If so returns the common result,
493 /// otherwise returns null.
494 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
495 const Query &Q, unsigned MaxRecurse) {
496 // Recursion is always used, so bail out at once if we already hit the limit.
500 // Make sure the phi is on the LHS.
501 if (!isa<PHINode>(LHS)) {
503 Pred = CmpInst::getSwappedPredicate(Pred);
505 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
506 PHINode *PI = cast<PHINode>(LHS);
508 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
509 if (!ValueDominatesPHI(RHS, PI, Q.DT))
512 // Evaluate the BinOp on the incoming phi values.
513 Value *CommonValue = nullptr;
514 for (Value *Incoming : PI->incoming_values()) {
515 // If the incoming value is the phi node itself, it can safely be skipped.
516 if (Incoming == PI) continue;
517 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
518 // If the operation failed to simplify, or simplified to a different value
519 // to previously, then give up.
520 if (!V || (CommonValue && V != CommonValue))
528 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
529 Value *&Op0, Value *&Op1,
531 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
532 if (auto *CRHS = dyn_cast<Constant>(Op1))
533 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
535 // Canonicalize the constant to the RHS if this is a commutative operation.
536 if (Instruction::isCommutative(Opcode))
542 /// Given operands for an Add, see if we can fold the result.
543 /// If not, this returns null.
544 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
545 const Query &Q, unsigned MaxRecurse) {
546 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
549 // X + undef -> undef
550 if (match(Op1, m_Undef()))
554 if (match(Op1, m_Zero()))
561 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
562 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
565 // X + ~X -> -1 since ~X = -X-1
566 Type *Ty = Op0->getType();
567 if (match(Op0, m_Not(m_Specific(Op1))) ||
568 match(Op1, m_Not(m_Specific(Op0))))
569 return Constant::getAllOnesValue(Ty);
571 // add nsw/nuw (xor Y, signmask), signmask --> Y
572 // The no-wrapping add guarantees that the top bit will be set by the add.
573 // Therefore, the xor must be clearing the already set sign bit of Y.
574 if ((isNSW || isNUW) && match(Op1, m_SignMask()) &&
575 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
579 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
580 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
583 // Try some generic simplifications for associative operations.
584 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
588 // Threading Add over selects and phi nodes is pointless, so don't bother.
589 // Threading over the select in "A + select(cond, B, C)" means evaluating
590 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
591 // only if B and C are equal. If B and C are equal then (since we assume
592 // that operands have already been simplified) "select(cond, B, C)" should
593 // have been simplified to the common value of B and C already. Analysing
594 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
595 // for threading over phi nodes.
600 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
601 const DataLayout &DL, const TargetLibraryInfo *TLI,
602 const DominatorTree *DT, AssumptionCache *AC,
603 const Instruction *CxtI) {
604 return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
608 /// \brief Compute the base pointer and cumulative constant offsets for V.
610 /// This strips all constant offsets off of V, leaving it the base pointer, and
611 /// accumulates the total constant offset applied in the returned constant. It
612 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
613 /// no constant offsets applied.
615 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
616 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
618 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
619 bool AllowNonInbounds = false) {
620 assert(V->getType()->getScalarType()->isPointerTy());
622 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
623 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
625 // Even though we don't look through PHI nodes, we could be called on an
626 // instruction in an unreachable block, which may be on a cycle.
627 SmallPtrSet<Value *, 4> Visited;
630 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
631 if ((!AllowNonInbounds && !GEP->isInBounds()) ||
632 !GEP->accumulateConstantOffset(DL, Offset))
634 V = GEP->getPointerOperand();
635 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
636 V = cast<Operator>(V)->getOperand(0);
637 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
638 if (GA->isInterposable())
640 V = GA->getAliasee();
642 if (auto CS = CallSite(V))
643 if (Value *RV = CS.getReturnedArgOperand()) {
649 assert(V->getType()->getScalarType()->isPointerTy() &&
650 "Unexpected operand type!");
651 } while (Visited.insert(V).second);
653 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
654 if (V->getType()->isVectorTy())
655 return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
660 /// \brief Compute the constant difference between two pointer values.
661 /// If the difference is not a constant, returns zero.
662 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
664 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
665 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
667 // If LHS and RHS are not related via constant offsets to the same base
668 // value, there is nothing we can do here.
672 // Otherwise, the difference of LHS - RHS can be computed as:
674 // = (LHSOffset + Base) - (RHSOffset + Base)
675 // = LHSOffset - RHSOffset
676 return ConstantExpr::getSub(LHSOffset, RHSOffset);
679 /// Given operands for a Sub, see if we can fold the result.
680 /// If not, this returns null.
681 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
682 const Query &Q, unsigned MaxRecurse) {
683 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
686 // X - undef -> undef
687 // undef - X -> undef
688 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
689 return UndefValue::get(Op0->getType());
692 if (match(Op1, m_Zero()))
697 return Constant::getNullValue(Op0->getType());
699 // Is this a negation?
700 if (match(Op0, m_Zero())) {
701 // 0 - X -> 0 if the sub is NUW.
705 unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
706 APInt KnownZero(BitWidth, 0);
707 APInt KnownOne(BitWidth, 0);
708 computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
709 if (KnownZero.isMaxSignedValue()) {
710 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
711 // Op1 must be 0 because negating the minimum signed value is undefined.
715 // 0 - X -> X if X is 0 or the minimum signed value.
720 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
721 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
722 Value *X = nullptr, *Y = nullptr, *Z = Op1;
723 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
724 // See if "V === Y - Z" simplifies.
725 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
726 // It does! Now see if "X + V" simplifies.
727 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
728 // It does, we successfully reassociated!
732 // See if "V === X - Z" simplifies.
733 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
734 // It does! Now see if "Y + V" simplifies.
735 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
736 // It does, we successfully reassociated!
742 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
743 // For example, X - (X + 1) -> -1
745 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
746 // See if "V === X - Y" simplifies.
747 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
748 // It does! Now see if "V - Z" simplifies.
749 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
750 // It does, we successfully reassociated!
754 // See if "V === X - Z" simplifies.
755 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
756 // It does! Now see if "V - Y" simplifies.
757 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
758 // It does, we successfully reassociated!
764 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
765 // For example, X - (X - Y) -> Y.
767 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
768 // See if "V === Z - X" simplifies.
769 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
770 // It does! Now see if "V + Y" simplifies.
771 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
772 // It does, we successfully reassociated!
777 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
778 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
779 match(Op1, m_Trunc(m_Value(Y))))
780 if (X->getType() == Y->getType())
781 // See if "V === X - Y" simplifies.
782 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
783 // It does! Now see if "trunc V" simplifies.
784 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
786 // It does, return the simplified "trunc V".
789 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
790 if (match(Op0, m_PtrToInt(m_Value(X))) &&
791 match(Op1, m_PtrToInt(m_Value(Y))))
792 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
793 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
796 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
797 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
800 // Threading Sub over selects and phi nodes is pointless, so don't bother.
801 // Threading over the select in "A - select(cond, B, C)" means evaluating
802 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
803 // only if B and C are equal. If B and C are equal then (since we assume
804 // that operands have already been simplified) "select(cond, B, C)" should
805 // have been simplified to the common value of B and C already. Analysing
806 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
807 // for threading over phi nodes.
812 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
813 const DataLayout &DL, const TargetLibraryInfo *TLI,
814 const DominatorTree *DT, AssumptionCache *AC,
815 const Instruction *CxtI) {
816 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
820 /// Given operands for an FAdd, see if we can fold the result. If not, this
822 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
823 const Query &Q, unsigned MaxRecurse) {
824 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
828 if (match(Op1, m_NegZero()))
831 // fadd X, 0 ==> X, when we know X is not -0
832 if (match(Op1, m_Zero()) &&
833 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
836 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
837 // where nnan and ninf have to occur at least once somewhere in this
839 Value *SubOp = nullptr;
840 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
842 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
845 Instruction *FSub = cast<Instruction>(SubOp);
846 if ((FMF.noNaNs() || FSub->hasNoNaNs()) &&
847 (FMF.noInfs() || FSub->hasNoInfs()))
848 return Constant::getNullValue(Op0->getType());
854 /// Given operands for an FSub, see if we can fold the result. If not, this
856 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
857 const Query &Q, unsigned MaxRecurse) {
858 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
862 if (match(Op1, m_Zero()))
865 // fsub X, -0 ==> X, when we know X is not -0
866 if (match(Op1, m_NegZero()) &&
867 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
870 // fsub -0.0, (fsub -0.0, X) ==> X
872 if (match(Op0, m_NegZero()) && match(Op1, m_FSub(m_NegZero(), m_Value(X))))
875 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
876 if (FMF.noSignedZeros() && match(Op0, m_AnyZero()) &&
877 match(Op1, m_FSub(m_AnyZero(), m_Value(X))))
880 // fsub nnan x, x ==> 0.0
881 if (FMF.noNaNs() && Op0 == Op1)
882 return Constant::getNullValue(Op0->getType());
887 /// Given the operands for an FMul, see if we can fold the result
888 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
889 const Query &Q, unsigned MaxRecurse) {
890 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
894 if (match(Op1, m_FPOne()))
897 // fmul nnan nsz X, 0 ==> 0
898 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
904 /// Given operands for a Mul, see if we can fold the result.
905 /// If not, this returns null.
906 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
907 unsigned MaxRecurse) {
908 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
912 if (match(Op1, m_Undef()))
913 return Constant::getNullValue(Op0->getType());
916 if (match(Op1, m_Zero()))
920 if (match(Op1, m_One()))
923 // (X / Y) * Y -> X if the division is exact.
925 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
926 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
930 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1))
931 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
934 // Try some generic simplifications for associative operations.
935 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
939 // Mul distributes over Add. Try some generic simplifications based on this.
940 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
944 // If the operation is with the result of a select instruction, check whether
945 // operating on either branch of the select always yields the same value.
946 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
947 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
951 // If the operation is with the result of a phi instruction, check whether
952 // operating on all incoming values of the phi always yields the same value.
953 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
954 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
961 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
962 const DataLayout &DL,
963 const TargetLibraryInfo *TLI,
964 const DominatorTree *DT, AssumptionCache *AC,
965 const Instruction *CxtI) {
966 return ::SimplifyFAddInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
970 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
971 const DataLayout &DL,
972 const TargetLibraryInfo *TLI,
973 const DominatorTree *DT, AssumptionCache *AC,
974 const Instruction *CxtI) {
975 return ::SimplifyFSubInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
979 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
980 const DataLayout &DL,
981 const TargetLibraryInfo *TLI,
982 const DominatorTree *DT, AssumptionCache *AC,
983 const Instruction *CxtI) {
984 return ::SimplifyFMulInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
988 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
989 const TargetLibraryInfo *TLI,
990 const DominatorTree *DT, AssumptionCache *AC,
991 const Instruction *CxtI) {
992 return ::SimplifyMulInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
996 /// Check for common or similar folds of integer division or integer remainder.
997 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
998 Type *Ty = Op0->getType();
1000 // X / undef -> undef
1001 // X % undef -> undef
1002 if (match(Op1, m_Undef()))
1007 // We don't need to preserve faults!
1008 if (match(Op1, m_Zero()))
1009 return UndefValue::get(Ty);
1011 // If any element of a constant divisor vector is zero, the whole op is undef.
1012 auto *Op1C = dyn_cast<Constant>(Op1);
1013 if (Op1C && Ty->isVectorTy()) {
1014 unsigned NumElts = Ty->getVectorNumElements();
1015 for (unsigned i = 0; i != NumElts; ++i) {
1016 Constant *Elt = Op1C->getAggregateElement(i);
1017 if (Elt && Elt->isNullValue())
1018 return UndefValue::get(Ty);
1024 if (match(Op0, m_Undef()))
1025 return Constant::getNullValue(Ty);
1029 if (match(Op0, m_Zero()))
1035 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1039 // If this is a boolean op (single-bit element type), we can't have
1040 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
1041 if (match(Op1, m_One()) || Ty->getScalarType()->isIntegerTy(1))
1042 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1047 /// Given operands for an SDiv or UDiv, see if we can fold the result.
1048 /// If not, this returns null.
1049 static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1050 const Query &Q, unsigned MaxRecurse) {
1051 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1054 if (Value *V = simplifyDivRem(Op0, Op1, true))
1057 bool isSigned = Opcode == Instruction::SDiv;
1059 // (X * Y) / Y -> X if the multiplication does not overflow.
1060 Value *X = nullptr, *Y = nullptr;
1061 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
1062 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
1063 OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
1064 // If the Mul knows it does not overflow, then we are good to go.
1065 if ((isSigned && Mul->hasNoSignedWrap()) ||
1066 (!isSigned && Mul->hasNoUnsignedWrap()))
1068 // If X has the form X = A / Y then X * Y cannot overflow.
1069 if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X))
1070 if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y)
1074 // (X rem Y) / Y -> 0
1075 if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1076 (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1077 return Constant::getNullValue(Op0->getType());
1079 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1080 ConstantInt *C1, *C2;
1081 if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1082 match(Op1, m_ConstantInt(C2))) {
1084 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1086 return Constant::getNullValue(Op0->getType());
1089 // If the operation is with the result of a select instruction, check whether
1090 // operating on either branch of the select always yields the same value.
1091 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1092 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1095 // If the operation is with the result of a phi instruction, check whether
1096 // operating on all incoming values of the phi always yields the same value.
1097 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1098 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1104 /// Given operands for an SDiv, see if we can fold the result.
1105 /// If not, this returns null.
1106 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
1107 unsigned MaxRecurse) {
1108 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
1114 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
1115 const TargetLibraryInfo *TLI,
1116 const DominatorTree *DT, AssumptionCache *AC,
1117 const Instruction *CxtI) {
1118 return ::SimplifySDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1122 /// Given operands for a UDiv, see if we can fold the result.
1123 /// If not, this returns null.
1124 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
1125 unsigned MaxRecurse) {
1126 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
1129 // udiv %V, C -> 0 if %V < C
1131 if (Constant *C = dyn_cast_or_null<Constant>(SimplifyICmpInst(
1132 ICmpInst::ICMP_ULT, Op0, Op1, Q, MaxRecurse - 1))) {
1133 if (C->isAllOnesValue()) {
1134 return Constant::getNullValue(Op0->getType());
1142 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
1143 const TargetLibraryInfo *TLI,
1144 const DominatorTree *DT, AssumptionCache *AC,
1145 const Instruction *CxtI) {
1146 return ::SimplifyUDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1150 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1151 const Query &Q, unsigned) {
1152 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
1155 // undef / X -> undef (the undef could be a snan).
1156 if (match(Op0, m_Undef()))
1159 // X / undef -> undef
1160 if (match(Op1, m_Undef()))
1164 if (match(Op1, m_FPOne()))
1168 // Requires that NaNs are off (X could be zero) and signed zeroes are
1169 // ignored (X could be positive or negative, so the output sign is unknown).
1170 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
1174 // X / X -> 1.0 is legal when NaNs are ignored.
1176 return ConstantFP::get(Op0->getType(), 1.0);
1178 // -X / X -> -1.0 and
1179 // X / -X -> -1.0 are legal when NaNs are ignored.
1180 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
1181 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
1182 BinaryOperator::getFNegArgument(Op0) == Op1) ||
1183 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
1184 BinaryOperator::getFNegArgument(Op1) == Op0))
1185 return ConstantFP::get(Op0->getType(), -1.0);
1191 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1192 const DataLayout &DL,
1193 const TargetLibraryInfo *TLI,
1194 const DominatorTree *DT, AssumptionCache *AC,
1195 const Instruction *CxtI) {
1196 return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
1200 /// Given operands for an SRem or URem, see if we can fold the result.
1201 /// If not, this returns null.
1202 static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1203 const Query &Q, unsigned MaxRecurse) {
1204 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1207 if (Value *V = simplifyDivRem(Op0, Op1, false))
1210 // (X % Y) % Y -> X % Y
1211 if ((Opcode == Instruction::SRem &&
1212 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1213 (Opcode == Instruction::URem &&
1214 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1217 // If the operation is with the result of a select instruction, check whether
1218 // operating on either branch of the select always yields the same value.
1219 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1220 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1223 // If the operation is with the result of a phi instruction, check whether
1224 // operating on all incoming values of the phi always yields the same value.
1225 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1226 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1232 /// Given operands for an SRem, see if we can fold the result.
1233 /// If not, this returns null.
1234 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
1235 unsigned MaxRecurse) {
1236 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
1242 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
1243 const TargetLibraryInfo *TLI,
1244 const DominatorTree *DT, AssumptionCache *AC,
1245 const Instruction *CxtI) {
1246 return ::SimplifySRemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1250 /// Given operands for a URem, see if we can fold the result.
1251 /// If not, this returns null.
1252 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
1253 unsigned MaxRecurse) {
1254 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
1257 // urem %V, C -> %V if %V < C
1259 if (Constant *C = dyn_cast_or_null<Constant>(SimplifyICmpInst(
1260 ICmpInst::ICMP_ULT, Op0, Op1, Q, MaxRecurse - 1))) {
1261 if (C->isAllOnesValue()) {
1270 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
1271 const TargetLibraryInfo *TLI,
1272 const DominatorTree *DT, AssumptionCache *AC,
1273 const Instruction *CxtI) {
1274 return ::SimplifyURemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1278 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1279 const Query &Q, unsigned) {
1280 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
1283 // undef % X -> undef (the undef could be a snan).
1284 if (match(Op0, m_Undef()))
1287 // X % undef -> undef
1288 if (match(Op1, m_Undef()))
1292 // Requires that NaNs are off (X could be zero) and signed zeroes are
1293 // ignored (X could be positive or negative, so the output sign is unknown).
1294 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
1300 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1301 const DataLayout &DL,
1302 const TargetLibraryInfo *TLI,
1303 const DominatorTree *DT, AssumptionCache *AC,
1304 const Instruction *CxtI) {
1305 return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
1309 /// Returns true if a shift by \c Amount always yields undef.
1310 static bool isUndefShift(Value *Amount) {
1311 Constant *C = dyn_cast<Constant>(Amount);
1315 // X shift by undef -> undef because it may shift by the bitwidth.
1316 if (isa<UndefValue>(C))
1319 // Shifting by the bitwidth or more is undefined.
1320 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1321 if (CI->getValue().getLimitedValue() >=
1322 CI->getType()->getScalarSizeInBits())
1325 // If all lanes of a vector shift are undefined the whole shift is.
1326 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1327 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
1328 if (!isUndefShift(C->getAggregateElement(I)))
1336 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1337 /// If not, this returns null.
1338 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1339 Value *Op1, const Query &Q, unsigned MaxRecurse) {
1340 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1343 // 0 shift by X -> 0
1344 if (match(Op0, m_Zero()))
1347 // X shift by 0 -> X
1348 if (match(Op1, m_Zero()))
1351 // Fold undefined shifts.
1352 if (isUndefShift(Op1))
1353 return UndefValue::get(Op0->getType());
1355 // If the operation is with the result of a select instruction, check whether
1356 // operating on either branch of the select always yields the same value.
1357 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1358 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1361 // If the operation is with the result of a phi instruction, check whether
1362 // operating on all incoming values of the phi always yields the same value.
1363 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1364 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1367 // If any bits in the shift amount make that value greater than or equal to
1368 // the number of bits in the type, the shift is undefined.
1369 unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
1370 APInt KnownZero(BitWidth, 0);
1371 APInt KnownOne(BitWidth, 0);
1372 computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1373 if (KnownOne.getLimitedValue() >= BitWidth)
1374 return UndefValue::get(Op0->getType());
1376 // If all valid bits in the shift amount are known zero, the first operand is
1378 unsigned NumValidShiftBits = Log2_32_Ceil(BitWidth);
1379 APInt ShiftAmountMask = APInt::getLowBitsSet(BitWidth, NumValidShiftBits);
1380 if ((KnownZero & ShiftAmountMask) == ShiftAmountMask)
1386 /// \brief Given operands for an Shl, LShr or AShr, see if we can
1387 /// fold the result. If not, this returns null.
1388 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1389 Value *Op1, bool isExact, const Query &Q,
1390 unsigned MaxRecurse) {
1391 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
1396 return Constant::getNullValue(Op0->getType());
1399 // undef >> X -> undef (if it's exact)
1400 if (match(Op0, m_Undef()))
1401 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1403 // The low bit cannot be shifted out of an exact shift if it is set.
1405 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
1406 APInt Op0KnownZero(BitWidth, 0);
1407 APInt Op0KnownOne(BitWidth, 0);
1408 computeKnownBits(Op0, Op0KnownZero, Op0KnownOne, Q.DL, /*Depth=*/0, Q.AC,
1417 /// Given operands for an Shl, see if we can fold the result.
1418 /// If not, this returns null.
1419 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1420 const Query &Q, unsigned MaxRecurse) {
1421 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
1425 // undef << X -> undef if (if it's NSW/NUW)
1426 if (match(Op0, m_Undef()))
1427 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1429 // (X >> A) << A -> X
1431 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1436 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1437 const DataLayout &DL, const TargetLibraryInfo *TLI,
1438 const DominatorTree *DT, AssumptionCache *AC,
1439 const Instruction *CxtI) {
1440 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
1444 /// Given operands for an LShr, see if we can fold the result.
1445 /// If not, this returns null.
1446 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1447 const Query &Q, unsigned MaxRecurse) {
1448 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1452 // (X << A) >> A -> X
1454 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1460 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1461 const DataLayout &DL,
1462 const TargetLibraryInfo *TLI,
1463 const DominatorTree *DT, AssumptionCache *AC,
1464 const Instruction *CxtI) {
1465 return ::SimplifyLShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
1469 /// Given operands for an AShr, see if we can fold the result.
1470 /// If not, this returns null.
1471 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1472 const Query &Q, unsigned MaxRecurse) {
1473 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1477 // all ones >>a X -> all ones
1478 if (match(Op0, m_AllOnes()))
1481 // (X << A) >> A -> X
1483 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1486 // Arithmetic shifting an all-sign-bit value is a no-op.
1487 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1488 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1494 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1495 const DataLayout &DL,
1496 const TargetLibraryInfo *TLI,
1497 const DominatorTree *DT, AssumptionCache *AC,
1498 const Instruction *CxtI) {
1499 return ::SimplifyAShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
1503 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1504 ICmpInst *UnsignedICmp, bool IsAnd) {
1507 ICmpInst::Predicate EqPred;
1508 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1509 !ICmpInst::isEquality(EqPred))
1512 ICmpInst::Predicate UnsignedPred;
1513 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1514 ICmpInst::isUnsigned(UnsignedPred))
1516 else if (match(UnsignedICmp,
1517 m_ICmp(UnsignedPred, m_Value(Y), m_Specific(X))) &&
1518 ICmpInst::isUnsigned(UnsignedPred))
1519 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1523 // X < Y && Y != 0 --> X < Y
1524 // X < Y || Y != 0 --> Y != 0
1525 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1526 return IsAnd ? UnsignedICmp : ZeroICmp;
1528 // X >= Y || Y != 0 --> true
1529 // X >= Y || Y == 0 --> X >= Y
1530 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
1531 if (EqPred == ICmpInst::ICMP_NE)
1532 return getTrue(UnsignedICmp->getType());
1533 return UnsignedICmp;
1536 // X < Y && Y == 0 --> false
1537 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1539 return getFalse(UnsignedICmp->getType());
1544 /// Commuted variants are assumed to be handled by calling this function again
1545 /// with the parameters swapped.
1546 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1547 ICmpInst::Predicate Pred0, Pred1;
1549 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1550 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1553 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1554 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1555 // can eliminate Op1 from this 'and'.
1556 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1559 // Check for any combination of predicates that are guaranteed to be disjoint.
1560 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1561 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1562 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1563 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1564 return getFalse(Op0->getType());
1569 /// Commuted variants are assumed to be handled by calling this function again
1570 /// with the parameters swapped.
1571 static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1572 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
1575 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1578 // Look for this pattern: (icmp V, C0) & (icmp V, C1)).
1579 Type *ITy = Op0->getType();
1580 ICmpInst::Predicate Pred0, Pred1;
1581 const APInt *C0, *C1;
1583 if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
1584 match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
1585 // Make a constant range that's the intersection of the two icmp ranges.
1586 // If the intersection is empty, we know that the result is false.
1587 auto Range0 = ConstantRange::makeAllowedICmpRegion(Pred0, *C0);
1588 auto Range1 = ConstantRange::makeAllowedICmpRegion(Pred1, *C1);
1589 if (Range0.intersectWith(Range1).isEmptySet())
1590 return getFalse(ITy);
1593 // (icmp (add V, C0), C1) & (icmp V, C0)
1594 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1597 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1600 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1601 if (AddInst->getOperand(1) != Op1->getOperand(1))
1604 bool isNSW = AddInst->hasNoSignedWrap();
1605 bool isNUW = AddInst->hasNoUnsignedWrap();
1607 const APInt Delta = *C1 - *C0;
1608 if (C0->isStrictlyPositive()) {
1610 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1611 return getFalse(ITy);
1612 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1613 return getFalse(ITy);
1616 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1617 return getFalse(ITy);
1618 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1619 return getFalse(ITy);
1622 if (C0->getBoolValue() && isNUW) {
1624 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1625 return getFalse(ITy);
1627 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1628 return getFalse(ITy);
1634 /// Given operands for an And, see if we can fold the result.
1635 /// If not, this returns null.
1636 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
1637 unsigned MaxRecurse) {
1638 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
1642 if (match(Op1, m_Undef()))
1643 return Constant::getNullValue(Op0->getType());
1650 if (match(Op1, m_Zero()))
1654 if (match(Op1, m_AllOnes()))
1657 // A & ~A = ~A & A = 0
1658 if (match(Op0, m_Not(m_Specific(Op1))) ||
1659 match(Op1, m_Not(m_Specific(Op0))))
1660 return Constant::getNullValue(Op0->getType());
1663 Value *A = nullptr, *B = nullptr;
1664 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1665 (A == Op1 || B == Op1))
1669 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
1670 (A == Op0 || B == Op0))
1673 // A & (-A) = A if A is a power of two or zero.
1674 if (match(Op0, m_Neg(m_Specific(Op1))) ||
1675 match(Op1, m_Neg(m_Specific(Op0)))) {
1676 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1679 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1684 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
1685 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
1686 if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
1688 if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
1693 // The compares may be hidden behind casts. Look through those and try the
1694 // same folds as above.
1695 auto *Cast0 = dyn_cast<CastInst>(Op0);
1696 auto *Cast1 = dyn_cast<CastInst>(Op1);
1697 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1698 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1699 auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
1700 auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
1702 Instruction::CastOps CastOpc = Cast0->getOpcode();
1703 Type *ResultType = Cast0->getType();
1704 if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
1705 return ConstantExpr::getCast(CastOpc, V, ResultType);
1706 if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
1707 return ConstantExpr::getCast(CastOpc, V, ResultType);
1711 // Try some generic simplifications for associative operations.
1712 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
1716 // And distributes over Or. Try some generic simplifications based on this.
1717 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
1721 // And distributes over Xor. Try some generic simplifications based on this.
1722 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
1726 // If the operation is with the result of a select instruction, check whether
1727 // operating on either branch of the select always yields the same value.
1728 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1729 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
1733 // If the operation is with the result of a phi instruction, check whether
1734 // operating on all incoming values of the phi always yields the same value.
1735 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1736 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
1743 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
1744 const TargetLibraryInfo *TLI,
1745 const DominatorTree *DT, AssumptionCache *AC,
1746 const Instruction *CxtI) {
1747 return ::SimplifyAndInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1751 /// Commuted variants are assumed to be handled by calling this function again
1752 /// with the parameters swapped.
1753 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1754 ICmpInst::Predicate Pred0, Pred1;
1756 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1757 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1760 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1761 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1762 // can eliminate Op0 from this 'or'.
1763 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1766 // Check for any combination of predicates that cover the entire range of
1768 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1769 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1770 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1771 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1772 return getTrue(Op0->getType());
1777 /// Commuted variants are assumed to be handled by calling this function again
1778 /// with the parameters swapped.
1779 static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1780 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
1783 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1786 // (icmp (add V, C0), C1) | (icmp V, C0)
1787 ICmpInst::Predicate Pred0, Pred1;
1788 const APInt *C0, *C1;
1790 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1793 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1796 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1797 if (AddInst->getOperand(1) != Op1->getOperand(1))
1800 Type *ITy = Op0->getType();
1801 bool isNSW = AddInst->hasNoSignedWrap();
1802 bool isNUW = AddInst->hasNoUnsignedWrap();
1804 const APInt Delta = *C1 - *C0;
1805 if (C0->isStrictlyPositive()) {
1807 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1808 return getTrue(ITy);
1809 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1810 return getTrue(ITy);
1813 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1814 return getTrue(ITy);
1815 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1816 return getTrue(ITy);
1819 if (C0->getBoolValue() && isNUW) {
1821 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1822 return getTrue(ITy);
1824 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1825 return getTrue(ITy);
1831 /// Given operands for an Or, see if we can fold the result.
1832 /// If not, this returns null.
1833 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
1834 unsigned MaxRecurse) {
1835 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
1839 if (match(Op1, m_Undef()))
1840 return Constant::getAllOnesValue(Op0->getType());
1847 if (match(Op1, m_Zero()))
1851 if (match(Op1, m_AllOnes()))
1854 // A | ~A = ~A | A = -1
1855 if (match(Op0, m_Not(m_Specific(Op1))) ||
1856 match(Op1, m_Not(m_Specific(Op0))))
1857 return Constant::getAllOnesValue(Op0->getType());
1860 Value *A = nullptr, *B = nullptr;
1861 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1862 (A == Op1 || B == Op1))
1866 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1867 (A == Op0 || B == Op0))
1870 // ~(A & ?) | A = -1
1871 if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) &&
1872 (A == Op1 || B == Op1))
1873 return Constant::getAllOnesValue(Op1->getType());
1875 // A | ~(A & ?) = -1
1876 if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) &&
1877 (A == Op0 || B == Op0))
1878 return Constant::getAllOnesValue(Op0->getType());
1880 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
1881 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
1882 if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
1884 if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
1889 // Try some generic simplifications for associative operations.
1890 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
1894 // Or distributes over And. Try some generic simplifications based on this.
1895 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
1899 // If the operation is with the result of a select instruction, check whether
1900 // operating on either branch of the select always yields the same value.
1901 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1902 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
1907 Value *C = nullptr, *D = nullptr;
1908 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1909 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1910 ConstantInt *C1 = dyn_cast<ConstantInt>(C);
1911 ConstantInt *C2 = dyn_cast<ConstantInt>(D);
1912 if (C1 && C2 && (C1->getValue() == ~C2->getValue())) {
1913 // (A & C1)|(B & C2)
1914 // If we have: ((V + N) & C1) | (V & C2)
1915 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1916 // replace with V+N.
1918 if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+
1919 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1920 // Add commutes, try both ways.
1922 MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1925 MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1928 // Or commutes, try both ways.
1929 if ((C1->getValue() & (C1->getValue() + 1)) == 0 &&
1930 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1931 // Add commutes, try both ways.
1933 MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1936 MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1942 // If the operation is with the result of a phi instruction, check whether
1943 // operating on all incoming values of the phi always yields the same value.
1944 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1945 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
1951 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
1952 const TargetLibraryInfo *TLI,
1953 const DominatorTree *DT, AssumptionCache *AC,
1954 const Instruction *CxtI) {
1955 return ::SimplifyOrInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1959 /// Given operands for a Xor, see if we can fold the result.
1960 /// If not, this returns null.
1961 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
1962 unsigned MaxRecurse) {
1963 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
1966 // A ^ undef -> undef
1967 if (match(Op1, m_Undef()))
1971 if (match(Op1, m_Zero()))
1976 return Constant::getNullValue(Op0->getType());
1978 // A ^ ~A = ~A ^ A = -1
1979 if (match(Op0, m_Not(m_Specific(Op1))) ||
1980 match(Op1, m_Not(m_Specific(Op0))))
1981 return Constant::getAllOnesValue(Op0->getType());
1983 // Try some generic simplifications for associative operations.
1984 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
1988 // Threading Xor over selects and phi nodes is pointless, so don't bother.
1989 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
1990 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
1991 // only if B and C are equal. If B and C are equal then (since we assume
1992 // that operands have already been simplified) "select(cond, B, C)" should
1993 // have been simplified to the common value of B and C already. Analysing
1994 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
1995 // for threading over phi nodes.
2000 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
2001 const TargetLibraryInfo *TLI,
2002 const DominatorTree *DT, AssumptionCache *AC,
2003 const Instruction *CxtI) {
2004 return ::SimplifyXorInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
2008 static Type *GetCompareTy(Value *Op) {
2009 return CmpInst::makeCmpResultType(Op->getType());
2012 /// Rummage around inside V looking for something equivalent to the comparison
2013 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2014 /// Helper function for analyzing max/min idioms.
2015 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2016 Value *LHS, Value *RHS) {
2017 SelectInst *SI = dyn_cast<SelectInst>(V);
2020 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2023 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2024 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2026 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2027 LHS == CmpRHS && RHS == CmpLHS)
2032 // A significant optimization not implemented here is assuming that alloca
2033 // addresses are not equal to incoming argument values. They don't *alias*,
2034 // as we say, but that doesn't mean they aren't equal, so we take a
2035 // conservative approach.
2037 // This is inspired in part by C++11 5.10p1:
2038 // "Two pointers of the same type compare equal if and only if they are both
2039 // null, both point to the same function, or both represent the same
2042 // This is pretty permissive.
2044 // It's also partly due to C11 6.5.9p6:
2045 // "Two pointers compare equal if and only if both are null pointers, both are
2046 // pointers to the same object (including a pointer to an object and a
2047 // subobject at its beginning) or function, both are pointers to one past the
2048 // last element of the same array object, or one is a pointer to one past the
2049 // end of one array object and the other is a pointer to the start of a
2050 // different array object that happens to immediately follow the first array
2051 // object in the address space.)
2053 // C11's version is more restrictive, however there's no reason why an argument
2054 // couldn't be a one-past-the-end value for a stack object in the caller and be
2055 // equal to the beginning of a stack object in the callee.
2057 // If the C and C++ standards are ever made sufficiently restrictive in this
2058 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2059 // this optimization.
2061 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
2062 const DominatorTree *DT, CmpInst::Predicate Pred,
2063 const Instruction *CxtI, Value *LHS, Value *RHS) {
2064 // First, skip past any trivial no-ops.
2065 LHS = LHS->stripPointerCasts();
2066 RHS = RHS->stripPointerCasts();
2068 // A non-null pointer is not equal to a null pointer.
2069 if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
2070 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
2071 return ConstantInt::get(GetCompareTy(LHS),
2072 !CmpInst::isTrueWhenEqual(Pred));
2074 // We can only fold certain predicates on pointer comparisons.
2079 // Equality comaprisons are easy to fold.
2080 case CmpInst::ICMP_EQ:
2081 case CmpInst::ICMP_NE:
2084 // We can only handle unsigned relational comparisons because 'inbounds' on
2085 // a GEP only protects against unsigned wrapping.
2086 case CmpInst::ICMP_UGT:
2087 case CmpInst::ICMP_UGE:
2088 case CmpInst::ICMP_ULT:
2089 case CmpInst::ICMP_ULE:
2090 // However, we have to switch them to their signed variants to handle
2091 // negative indices from the base pointer.
2092 Pred = ICmpInst::getSignedPredicate(Pred);
2096 // Strip off any constant offsets so that we can reason about them.
2097 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2098 // here and compare base addresses like AliasAnalysis does, however there are
2099 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2100 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2101 // doesn't need to guarantee pointer inequality when it says NoAlias.
2102 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2103 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2105 // If LHS and RHS are related via constant offsets to the same base
2106 // value, we can replace it with an icmp which just compares the offsets.
2108 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2110 // Various optimizations for (in)equality comparisons.
2111 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2112 // Different non-empty allocations that exist at the same time have
2113 // different addresses (if the program can tell). Global variables always
2114 // exist, so they always exist during the lifetime of each other and all
2115 // allocas. Two different allocas usually have different addresses...
2117 // However, if there's an @llvm.stackrestore dynamically in between two
2118 // allocas, they may have the same address. It's tempting to reduce the
2119 // scope of the problem by only looking at *static* allocas here. That would
2120 // cover the majority of allocas while significantly reducing the likelihood
2121 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2122 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2123 // an entry block. Also, if we have a block that's not attached to a
2124 // function, we can't tell if it's "static" under the current definition.
2125 // Theoretically, this problem could be fixed by creating a new kind of
2126 // instruction kind specifically for static allocas. Such a new instruction
2127 // could be required to be at the top of the entry block, thus preventing it
2128 // from being subject to a @llvm.stackrestore. Instcombine could even
2129 // convert regular allocas into these special allocas. It'd be nifty.
2130 // However, until then, this problem remains open.
2132 // So, we'll assume that two non-empty allocas have different addresses
2135 // With all that, if the offsets are within the bounds of their allocations
2136 // (and not one-past-the-end! so we can't use inbounds!), and their
2137 // allocations aren't the same, the pointers are not equal.
2139 // Note that it's not necessary to check for LHS being a global variable
2140 // address, due to canonicalization and constant folding.
2141 if (isa<AllocaInst>(LHS) &&
2142 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2143 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2144 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2145 uint64_t LHSSize, RHSSize;
2146 if (LHSOffsetCI && RHSOffsetCI &&
2147 getObjectSize(LHS, LHSSize, DL, TLI) &&
2148 getObjectSize(RHS, RHSSize, DL, TLI)) {
2149 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2150 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2151 if (!LHSOffsetValue.isNegative() &&
2152 !RHSOffsetValue.isNegative() &&
2153 LHSOffsetValue.ult(LHSSize) &&
2154 RHSOffsetValue.ult(RHSSize)) {
2155 return ConstantInt::get(GetCompareTy(LHS),
2156 !CmpInst::isTrueWhenEqual(Pred));
2160 // Repeat the above check but this time without depending on DataLayout
2161 // or being able to compute a precise size.
2162 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2163 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2164 LHSOffset->isNullValue() &&
2165 RHSOffset->isNullValue())
2166 return ConstantInt::get(GetCompareTy(LHS),
2167 !CmpInst::isTrueWhenEqual(Pred));
2170 // Even if an non-inbounds GEP occurs along the path we can still optimize
2171 // equality comparisons concerning the result. We avoid walking the whole
2172 // chain again by starting where the last calls to
2173 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2174 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2175 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2177 return ConstantExpr::getICmp(Pred,
2178 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2179 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2181 // If one side of the equality comparison must come from a noalias call
2182 // (meaning a system memory allocation function), and the other side must
2183 // come from a pointer that cannot overlap with dynamically-allocated
2184 // memory within the lifetime of the current function (allocas, byval
2185 // arguments, globals), then determine the comparison result here.
2186 SmallVector<Value *, 8> LHSUObjs, RHSUObjs;
2187 GetUnderlyingObjects(LHS, LHSUObjs, DL);
2188 GetUnderlyingObjects(RHS, RHSUObjs, DL);
2190 // Is the set of underlying objects all noalias calls?
2191 auto IsNAC = [](ArrayRef<Value *> Objects) {
2192 return all_of(Objects, isNoAliasCall);
2195 // Is the set of underlying objects all things which must be disjoint from
2196 // noalias calls. For allocas, we consider only static ones (dynamic
2197 // allocas might be transformed into calls to malloc not simultaneously
2198 // live with the compared-to allocation). For globals, we exclude symbols
2199 // that might be resolve lazily to symbols in another dynamically-loaded
2200 // library (and, thus, could be malloc'ed by the implementation).
2201 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) {
2202 return all_of(Objects, [](Value *V) {
2203 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2204 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2205 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2206 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2207 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2208 !GV->isThreadLocal();
2209 if (const Argument *A = dyn_cast<Argument>(V))
2210 return A->hasByValAttr();
2215 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2216 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2217 return ConstantInt::get(GetCompareTy(LHS),
2218 !CmpInst::isTrueWhenEqual(Pred));
2220 // Fold comparisons for non-escaping pointer even if the allocation call
2221 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2222 // dynamic allocation call could be either of the operands.
2223 Value *MI = nullptr;
2224 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
2226 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
2228 // FIXME: We should also fold the compare when the pointer escapes, but the
2229 // compare dominates the pointer escape
2230 if (MI && !PointerMayBeCaptured(MI, true, true))
2231 return ConstantInt::get(GetCompareTy(LHS),
2232 CmpInst::isFalseWhenEqual(Pred));
2239 /// Fold an icmp when its operands have i1 scalar type.
2240 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2241 Value *RHS, const Query &Q) {
2242 Type *ITy = GetCompareTy(LHS); // The return type.
2243 Type *OpTy = LHS->getType(); // The operand type.
2244 if (!OpTy->getScalarType()->isIntegerTy(1))
2250 case ICmpInst::ICMP_EQ:
2252 if (match(RHS, m_One()))
2255 case ICmpInst::ICMP_NE:
2257 if (match(RHS, m_Zero()))
2260 case ICmpInst::ICMP_UGT:
2262 if (match(RHS, m_Zero()))
2265 case ICmpInst::ICMP_UGE:
2267 if (match(RHS, m_One()))
2269 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2270 return getTrue(ITy);
2272 case ICmpInst::ICMP_SGE:
2273 /// For signed comparison, the values for an i1 are 0 and -1
2274 /// respectively. This maps into a truth table of:
2275 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2276 /// 0 | 0 | 1 (0 >= 0) | 1
2277 /// 0 | 1 | 1 (0 >= -1) | 1
2278 /// 1 | 0 | 0 (-1 >= 0) | 0
2279 /// 1 | 1 | 1 (-1 >= -1) | 1
2280 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2281 return getTrue(ITy);
2283 case ICmpInst::ICMP_SLT:
2285 if (match(RHS, m_Zero()))
2288 case ICmpInst::ICMP_SLE:
2290 if (match(RHS, m_One()))
2293 case ICmpInst::ICMP_ULE:
2294 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2295 return getTrue(ITy);
2302 /// Try hard to fold icmp with zero RHS because this is a common case.
2303 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2304 Value *RHS, const Query &Q) {
2305 if (!match(RHS, m_Zero()))
2308 Type *ITy = GetCompareTy(LHS); // The return type.
2309 bool LHSKnownNonNegative, LHSKnownNegative;
2312 llvm_unreachable("Unknown ICmp predicate!");
2313 case ICmpInst::ICMP_ULT:
2314 return getFalse(ITy);
2315 case ICmpInst::ICMP_UGE:
2316 return getTrue(ITy);
2317 case ICmpInst::ICMP_EQ:
2318 case ICmpInst::ICMP_ULE:
2319 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2320 return getFalse(ITy);
2322 case ICmpInst::ICMP_NE:
2323 case ICmpInst::ICMP_UGT:
2324 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2325 return getTrue(ITy);
2327 case ICmpInst::ICMP_SLT:
2328 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2330 if (LHSKnownNegative)
2331 return getTrue(ITy);
2332 if (LHSKnownNonNegative)
2333 return getFalse(ITy);
2335 case ICmpInst::ICMP_SLE:
2336 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2338 if (LHSKnownNegative)
2339 return getTrue(ITy);
2340 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2341 return getFalse(ITy);
2343 case ICmpInst::ICMP_SGE:
2344 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2346 if (LHSKnownNegative)
2347 return getFalse(ITy);
2348 if (LHSKnownNonNegative)
2349 return getTrue(ITy);
2351 case ICmpInst::ICMP_SGT:
2352 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2354 if (LHSKnownNegative)
2355 return getFalse(ITy);
2356 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2357 return getTrue(ITy);
2364 /// Many binary operators with a constant operand have an easy-to-compute
2365 /// range of outputs. This can be used to fold a comparison to always true or
2367 static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) {
2368 unsigned Width = Lower.getBitWidth();
2370 switch (BO.getOpcode()) {
2371 case Instruction::Add:
2372 if (match(BO.getOperand(1), m_APInt(C)) && *C != 0) {
2373 // FIXME: If we have both nuw and nsw, we should reduce the range further.
2374 if (BO.hasNoUnsignedWrap()) {
2375 // 'add nuw x, C' produces [C, UINT_MAX].
2377 } else if (BO.hasNoSignedWrap()) {
2378 if (C->isNegative()) {
2379 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
2380 Lower = APInt::getSignedMinValue(Width);
2381 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
2383 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
2384 Lower = APInt::getSignedMinValue(Width) + *C;
2385 Upper = APInt::getSignedMaxValue(Width) + 1;
2391 case Instruction::And:
2392 if (match(BO.getOperand(1), m_APInt(C)))
2393 // 'and x, C' produces [0, C].
2397 case Instruction::Or:
2398 if (match(BO.getOperand(1), m_APInt(C)))
2399 // 'or x, C' produces [C, UINT_MAX].
2403 case Instruction::AShr:
2404 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2405 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
2406 Lower = APInt::getSignedMinValue(Width).ashr(*C);
2407 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
2408 } else if (match(BO.getOperand(0), m_APInt(C))) {
2409 unsigned ShiftAmount = Width - 1;
2410 if (*C != 0 && BO.isExact())
2411 ShiftAmount = C->countTrailingZeros();
2412 if (C->isNegative()) {
2413 // 'ashr C, x' produces [C, C >> (Width-1)]
2415 Upper = C->ashr(ShiftAmount) + 1;
2417 // 'ashr C, x' produces [C >> (Width-1), C]
2418 Lower = C->ashr(ShiftAmount);
2424 case Instruction::LShr:
2425 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2426 // 'lshr x, C' produces [0, UINT_MAX >> C].
2427 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
2428 } else if (match(BO.getOperand(0), m_APInt(C))) {
2429 // 'lshr C, x' produces [C >> (Width-1), C].
2430 unsigned ShiftAmount = Width - 1;
2431 if (*C != 0 && BO.isExact())
2432 ShiftAmount = C->countTrailingZeros();
2433 Lower = C->lshr(ShiftAmount);
2438 case Instruction::Shl:
2439 if (match(BO.getOperand(0), m_APInt(C))) {
2440 if (BO.hasNoUnsignedWrap()) {
2441 // 'shl nuw C, x' produces [C, C << CLZ(C)]
2443 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
2444 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
2445 if (C->isNegative()) {
2446 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
2447 unsigned ShiftAmount = C->countLeadingOnes() - 1;
2448 Lower = C->shl(ShiftAmount);
2451 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
2452 unsigned ShiftAmount = C->countLeadingZeros() - 1;
2454 Upper = C->shl(ShiftAmount) + 1;
2460 case Instruction::SDiv:
2461 if (match(BO.getOperand(1), m_APInt(C))) {
2462 APInt IntMin = APInt::getSignedMinValue(Width);
2463 APInt IntMax = APInt::getSignedMaxValue(Width);
2464 if (C->isAllOnesValue()) {
2465 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
2466 // where C != -1 and C != 0 and C != 1
2469 } else if (C->countLeadingZeros() < Width - 1) {
2470 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
2471 // where C != -1 and C != 0 and C != 1
2472 Lower = IntMin.sdiv(*C);
2473 Upper = IntMax.sdiv(*C);
2474 if (Lower.sgt(Upper))
2475 std::swap(Lower, Upper);
2477 assert(Upper != Lower && "Upper part of range has wrapped!");
2479 } else if (match(BO.getOperand(0), m_APInt(C))) {
2480 if (C->isMinSignedValue()) {
2481 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
2483 Upper = Lower.lshr(1) + 1;
2485 // 'sdiv C, x' produces [-|C|, |C|].
2486 Upper = C->abs() + 1;
2487 Lower = (-Upper) + 1;
2492 case Instruction::UDiv:
2493 if (match(BO.getOperand(1), m_APInt(C)) && *C != 0) {
2494 // 'udiv x, C' produces [0, UINT_MAX / C].
2495 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
2496 } else if (match(BO.getOperand(0), m_APInt(C))) {
2497 // 'udiv C, x' produces [0, C].
2502 case Instruction::SRem:
2503 if (match(BO.getOperand(1), m_APInt(C))) {
2504 // 'srem x, C' produces (-|C|, |C|).
2506 Lower = (-Upper) + 1;
2510 case Instruction::URem:
2511 if (match(BO.getOperand(1), m_APInt(C)))
2512 // 'urem x, C' produces [0, C).
2521 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2524 if (!match(RHS, m_APInt(C)))
2527 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2528 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2529 if (RHS_CR.isEmptySet())
2530 return ConstantInt::getFalse(GetCompareTy(RHS));
2531 if (RHS_CR.isFullSet())
2532 return ConstantInt::getTrue(GetCompareTy(RHS));
2534 // Find the range of possible values for binary operators.
2535 unsigned Width = C->getBitWidth();
2536 APInt Lower = APInt(Width, 0);
2537 APInt Upper = APInt(Width, 0);
2538 if (auto *BO = dyn_cast<BinaryOperator>(LHS))
2539 setLimitsForBinOp(*BO, Lower, Upper);
2541 ConstantRange LHS_CR =
2542 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
2544 if (auto *I = dyn_cast<Instruction>(LHS))
2545 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
2546 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges));
2548 if (!LHS_CR.isFullSet()) {
2549 if (RHS_CR.contains(LHS_CR))
2550 return ConstantInt::getTrue(GetCompareTy(RHS));
2551 if (RHS_CR.inverse().contains(LHS_CR))
2552 return ConstantInt::getFalse(GetCompareTy(RHS));
2558 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2559 Value *RHS, const Query &Q,
2560 unsigned MaxRecurse) {
2561 Type *ITy = GetCompareTy(LHS); // The return type.
2563 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2564 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2565 if (MaxRecurse && (LBO || RBO)) {
2566 // Analyze the case when either LHS or RHS is an add instruction.
2567 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2568 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2569 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2570 if (LBO && LBO->getOpcode() == Instruction::Add) {
2571 A = LBO->getOperand(0);
2572 B = LBO->getOperand(1);
2574 ICmpInst::isEquality(Pred) ||
2575 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
2576 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
2578 if (RBO && RBO->getOpcode() == Instruction::Add) {
2579 C = RBO->getOperand(0);
2580 D = RBO->getOperand(1);
2582 ICmpInst::isEquality(Pred) ||
2583 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
2584 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
2587 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2588 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2589 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2590 Constant::getNullValue(RHS->getType()), Q,
2594 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2595 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2597 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2598 C == LHS ? D : C, Q, MaxRecurse - 1))
2601 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2602 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
2604 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2607 // C + B == C + D -> B == D
2610 } else if (A == D) {
2611 // D + B == C + D -> B == C
2614 } else if (B == C) {
2615 // A + C == C + D -> A == D
2620 // A + D == C + D -> A == C
2624 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
2631 // icmp pred (or X, Y), X
2632 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2633 if (Pred == ICmpInst::ICMP_ULT)
2634 return getFalse(ITy);
2635 if (Pred == ICmpInst::ICMP_UGE)
2636 return getTrue(ITy);
2638 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2639 bool RHSKnownNonNegative, RHSKnownNegative;
2640 bool YKnownNonNegative, YKnownNegative;
2641 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, Q.DL, 0,
2642 Q.AC, Q.CxtI, Q.DT);
2643 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
2645 if (RHSKnownNonNegative && YKnownNegative)
2646 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2647 if (RHSKnownNegative || YKnownNonNegative)
2648 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2651 // icmp pred X, (or X, Y)
2652 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) {
2653 if (Pred == ICmpInst::ICMP_ULE)
2654 return getTrue(ITy);
2655 if (Pred == ICmpInst::ICMP_UGT)
2656 return getFalse(ITy);
2658 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) {
2659 bool LHSKnownNonNegative, LHSKnownNegative;
2660 bool YKnownNonNegative, YKnownNegative;
2661 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0,
2662 Q.AC, Q.CxtI, Q.DT);
2663 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
2665 if (LHSKnownNonNegative && YKnownNegative)
2666 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
2667 if (LHSKnownNegative || YKnownNonNegative)
2668 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy);
2673 // icmp pred (and X, Y), X
2674 if (LBO && match(LBO, m_CombineOr(m_And(m_Value(), m_Specific(RHS)),
2675 m_And(m_Specific(RHS), m_Value())))) {
2676 if (Pred == ICmpInst::ICMP_UGT)
2677 return getFalse(ITy);
2678 if (Pred == ICmpInst::ICMP_ULE)
2679 return getTrue(ITy);
2681 // icmp pred X, (and X, Y)
2682 if (RBO && match(RBO, m_CombineOr(m_And(m_Value(), m_Specific(LHS)),
2683 m_And(m_Specific(LHS), m_Value())))) {
2684 if (Pred == ICmpInst::ICMP_UGE)
2685 return getTrue(ITy);
2686 if (Pred == ICmpInst::ICMP_ULT)
2687 return getFalse(ITy);
2690 // 0 - (zext X) pred C
2691 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
2692 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2693 if (RHSC->getValue().isStrictlyPositive()) {
2694 if (Pred == ICmpInst::ICMP_SLT)
2695 return ConstantInt::getTrue(RHSC->getContext());
2696 if (Pred == ICmpInst::ICMP_SGE)
2697 return ConstantInt::getFalse(RHSC->getContext());
2698 if (Pred == ICmpInst::ICMP_EQ)
2699 return ConstantInt::getFalse(RHSC->getContext());
2700 if (Pred == ICmpInst::ICMP_NE)
2701 return ConstantInt::getTrue(RHSC->getContext());
2703 if (RHSC->getValue().isNonNegative()) {
2704 if (Pred == ICmpInst::ICMP_SLE)
2705 return ConstantInt::getTrue(RHSC->getContext());
2706 if (Pred == ICmpInst::ICMP_SGT)
2707 return ConstantInt::getFalse(RHSC->getContext());
2712 // icmp pred (urem X, Y), Y
2713 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2714 bool KnownNonNegative, KnownNegative;
2718 case ICmpInst::ICMP_SGT:
2719 case ICmpInst::ICMP_SGE:
2720 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2722 if (!KnownNonNegative)
2725 case ICmpInst::ICMP_EQ:
2726 case ICmpInst::ICMP_UGT:
2727 case ICmpInst::ICMP_UGE:
2728 return getFalse(ITy);
2729 case ICmpInst::ICMP_SLT:
2730 case ICmpInst::ICMP_SLE:
2731 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2733 if (!KnownNonNegative)
2736 case ICmpInst::ICMP_NE:
2737 case ICmpInst::ICMP_ULT:
2738 case ICmpInst::ICMP_ULE:
2739 return getTrue(ITy);
2743 // icmp pred X, (urem Y, X)
2744 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
2745 bool KnownNonNegative, KnownNegative;
2749 case ICmpInst::ICMP_SGT:
2750 case ICmpInst::ICMP_SGE:
2751 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2753 if (!KnownNonNegative)
2756 case ICmpInst::ICMP_NE:
2757 case ICmpInst::ICMP_UGT:
2758 case ICmpInst::ICMP_UGE:
2759 return getTrue(ITy);
2760 case ICmpInst::ICMP_SLT:
2761 case ICmpInst::ICMP_SLE:
2762 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2764 if (!KnownNonNegative)
2767 case ICmpInst::ICMP_EQ:
2768 case ICmpInst::ICMP_ULT:
2769 case ICmpInst::ICMP_ULE:
2770 return getFalse(ITy);
2776 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2777 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) {
2778 // icmp pred (X op Y), X
2779 if (Pred == ICmpInst::ICMP_UGT)
2780 return getFalse(ITy);
2781 if (Pred == ICmpInst::ICMP_ULE)
2782 return getTrue(ITy);
2787 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) ||
2788 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) {
2789 // icmp pred X, (X op Y)
2790 if (Pred == ICmpInst::ICMP_ULT)
2791 return getFalse(ITy);
2792 if (Pred == ICmpInst::ICMP_UGE)
2793 return getTrue(ITy);
2800 // where CI2 is a power of 2 and CI isn't
2801 if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
2802 const APInt *CI2Val, *CIVal = &CI->getValue();
2803 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
2804 CI2Val->isPowerOf2()) {
2805 if (!CIVal->isPowerOf2()) {
2806 // CI2 << X can equal zero in some circumstances,
2807 // this simplification is unsafe if CI is zero.
2809 // We know it is safe if:
2810 // - The shift is nsw, we can't shift out the one bit.
2811 // - The shift is nuw, we can't shift out the one bit.
2814 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
2815 *CI2Val == 1 || !CI->isZero()) {
2816 if (Pred == ICmpInst::ICMP_EQ)
2817 return ConstantInt::getFalse(RHS->getContext());
2818 if (Pred == ICmpInst::ICMP_NE)
2819 return ConstantInt::getTrue(RHS->getContext());
2822 if (CIVal->isSignMask() && *CI2Val == 1) {
2823 if (Pred == ICmpInst::ICMP_UGT)
2824 return ConstantInt::getFalse(RHS->getContext());
2825 if (Pred == ICmpInst::ICMP_ULE)
2826 return ConstantInt::getTrue(RHS->getContext());
2831 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
2832 LBO->getOperand(1) == RBO->getOperand(1)) {
2833 switch (LBO->getOpcode()) {
2836 case Instruction::UDiv:
2837 case Instruction::LShr:
2838 if (ICmpInst::isSigned(Pred))
2841 case Instruction::SDiv:
2842 case Instruction::AShr:
2843 if (!LBO->isExact() || !RBO->isExact())
2845 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2846 RBO->getOperand(0), Q, MaxRecurse - 1))
2849 case Instruction::Shl: {
2850 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
2851 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
2854 if (!NSW && ICmpInst::isSigned(Pred))
2856 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2857 RBO->getOperand(0), Q, MaxRecurse - 1))
2866 /// Simplify integer comparisons where at least one operand of the compare
2867 /// matches an integer min/max idiom.
2868 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
2869 Value *RHS, const Query &Q,
2870 unsigned MaxRecurse) {
2871 Type *ITy = GetCompareTy(LHS); // The return type.
2873 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
2874 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
2876 // Signed variants on "max(a,b)>=a -> true".
2877 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2879 std::swap(A, B); // smax(A, B) pred A.
2880 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2881 // We analyze this as smax(A, B) pred A.
2883 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
2884 (A == LHS || B == LHS)) {
2886 std::swap(A, B); // A pred smax(A, B).
2887 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2888 // We analyze this as smax(A, B) swapped-pred A.
2889 P = CmpInst::getSwappedPredicate(Pred);
2890 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
2891 (A == RHS || B == RHS)) {
2893 std::swap(A, B); // smin(A, B) pred A.
2894 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2895 // We analyze this as smax(-A, -B) swapped-pred -A.
2896 // Note that we do not need to actually form -A or -B thanks to EqP.
2897 P = CmpInst::getSwappedPredicate(Pred);
2898 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
2899 (A == LHS || B == LHS)) {
2901 std::swap(A, B); // A pred smin(A, B).
2902 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2903 // We analyze this as smax(-A, -B) pred -A.
2904 // Note that we do not need to actually form -A or -B thanks to EqP.
2907 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2908 // Cases correspond to "max(A, B) p A".
2912 case CmpInst::ICMP_EQ:
2913 case CmpInst::ICMP_SLE:
2914 // Equivalent to "A EqP B". This may be the same as the condition tested
2915 // in the max/min; if so, we can just return that.
2916 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2918 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2920 // Otherwise, see if "A EqP B" simplifies.
2922 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2925 case CmpInst::ICMP_NE:
2926 case CmpInst::ICMP_SGT: {
2927 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2928 // Equivalent to "A InvEqP B". This may be the same as the condition
2929 // tested in the max/min; if so, we can just return that.
2930 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2932 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
2934 // Otherwise, see if "A InvEqP B" simplifies.
2936 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
2940 case CmpInst::ICMP_SGE:
2942 return getTrue(ITy);
2943 case CmpInst::ICMP_SLT:
2945 return getFalse(ITy);
2949 // Unsigned variants on "max(a,b)>=a -> true".
2950 P = CmpInst::BAD_ICMP_PREDICATE;
2951 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2953 std::swap(A, B); // umax(A, B) pred A.
2954 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2955 // We analyze this as umax(A, B) pred A.
2957 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
2958 (A == LHS || B == LHS)) {
2960 std::swap(A, B); // A pred umax(A, B).
2961 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2962 // We analyze this as umax(A, B) swapped-pred A.
2963 P = CmpInst::getSwappedPredicate(Pred);
2964 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
2965 (A == RHS || B == RHS)) {
2967 std::swap(A, B); // umin(A, B) pred A.
2968 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2969 // We analyze this as umax(-A, -B) swapped-pred -A.
2970 // Note that we do not need to actually form -A or -B thanks to EqP.
2971 P = CmpInst::getSwappedPredicate(Pred);
2972 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
2973 (A == LHS || B == LHS)) {
2975 std::swap(A, B); // A pred umin(A, B).
2976 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2977 // We analyze this as umax(-A, -B) pred -A.
2978 // Note that we do not need to actually form -A or -B thanks to EqP.
2981 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2982 // Cases correspond to "max(A, B) p A".
2986 case CmpInst::ICMP_EQ:
2987 case CmpInst::ICMP_ULE:
2988 // Equivalent to "A EqP B". This may be the same as the condition tested
2989 // in the max/min; if so, we can just return that.
2990 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2992 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2994 // Otherwise, see if "A EqP B" simplifies.
2996 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2999 case CmpInst::ICMP_NE:
3000 case CmpInst::ICMP_UGT: {
3001 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3002 // Equivalent to "A InvEqP B". This may be the same as the condition
3003 // tested in the max/min; if so, we can just return that.
3004 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3006 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3008 // Otherwise, see if "A InvEqP B" simplifies.
3010 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3014 case CmpInst::ICMP_UGE:
3016 return getTrue(ITy);
3017 case CmpInst::ICMP_ULT:
3019 return getFalse(ITy);
3023 // Variants on "max(x,y) >= min(x,z)".
3025 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3026 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3027 (A == C || A == D || B == C || B == D)) {
3028 // max(x, ?) pred min(x, ?).
3029 if (Pred == CmpInst::ICMP_SGE)
3031 return getTrue(ITy);
3032 if (Pred == CmpInst::ICMP_SLT)
3034 return getFalse(ITy);
3035 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3036 match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
3037 (A == C || A == D || B == C || B == D)) {
3038 // min(x, ?) pred max(x, ?).
3039 if (Pred == CmpInst::ICMP_SLE)
3041 return getTrue(ITy);
3042 if (Pred == CmpInst::ICMP_SGT)
3044 return getFalse(ITy);
3045 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3046 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3047 (A == C || A == D || B == C || B == D)) {
3048 // max(x, ?) pred min(x, ?).
3049 if (Pred == CmpInst::ICMP_UGE)
3051 return getTrue(ITy);
3052 if (Pred == CmpInst::ICMP_ULT)
3054 return getFalse(ITy);
3055 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3056 match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
3057 (A == C || A == D || B == C || B == D)) {
3058 // min(x, ?) pred max(x, ?).
3059 if (Pred == CmpInst::ICMP_ULE)
3061 return getTrue(ITy);
3062 if (Pred == CmpInst::ICMP_UGT)
3064 return getFalse(ITy);
3070 /// Given operands for an ICmpInst, see if we can fold the result.
3071 /// If not, this returns null.
3072 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3073 const Query &Q, unsigned MaxRecurse) {
3074 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3075 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3077 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3078 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3079 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3081 // If we have a constant, make sure it is on the RHS.
3082 std::swap(LHS, RHS);
3083 Pred = CmpInst::getSwappedPredicate(Pred);
3086 Type *ITy = GetCompareTy(LHS); // The return type.
3088 // icmp X, X -> true/false
3089 // X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
3090 // because X could be 0.
3091 if (LHS == RHS || isa<UndefValue>(RHS))
3092 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3094 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3097 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3100 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS))
3103 // If both operands have range metadata, use the metadata
3104 // to simplify the comparison.
3105 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3106 auto RHS_Instr = cast<Instruction>(RHS);
3107 auto LHS_Instr = cast<Instruction>(LHS);
3109 if (RHS_Instr->getMetadata(LLVMContext::MD_range) &&
3110 LHS_Instr->getMetadata(LLVMContext::MD_range)) {
3111 auto RHS_CR = getConstantRangeFromMetadata(
3112 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3113 auto LHS_CR = getConstantRangeFromMetadata(
3114 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3116 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
3117 if (Satisfied_CR.contains(LHS_CR))
3118 return ConstantInt::getTrue(RHS->getContext());
3120 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
3121 CmpInst::getInversePredicate(Pred), RHS_CR);
3122 if (InversedSatisfied_CR.contains(LHS_CR))
3123 return ConstantInt::getFalse(RHS->getContext());
3127 // Compare of cast, for example (zext X) != 0 -> X != 0
3128 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3129 Instruction *LI = cast<CastInst>(LHS);
3130 Value *SrcOp = LI->getOperand(0);
3131 Type *SrcTy = SrcOp->getType();
3132 Type *DstTy = LI->getType();
3134 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3135 // if the integer type is the same size as the pointer type.
3136 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3137 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3138 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3139 // Transfer the cast to the constant.
3140 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3141 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3144 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3145 if (RI->getOperand(0)->getType() == SrcTy)
3146 // Compare without the cast.
3147 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3153 if (isa<ZExtInst>(LHS)) {
3154 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3156 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3157 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3158 // Compare X and Y. Note that signed predicates become unsigned.
3159 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3160 SrcOp, RI->getOperand(0), Q,
3164 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3165 // too. If not, then try to deduce the result of the comparison.
3166 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3167 // Compute the constant that would happen if we truncated to SrcTy then
3168 // reextended to DstTy.
3169 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3170 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3172 // If the re-extended constant didn't change then this is effectively
3173 // also a case of comparing two zero-extended values.
3174 if (RExt == CI && MaxRecurse)
3175 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3176 SrcOp, Trunc, Q, MaxRecurse-1))
3179 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3180 // there. Use this to work out the result of the comparison.
3183 default: llvm_unreachable("Unknown ICmp predicate!");
3185 case ICmpInst::ICMP_EQ:
3186 case ICmpInst::ICMP_UGT:
3187 case ICmpInst::ICMP_UGE:
3188 return ConstantInt::getFalse(CI->getContext());
3190 case ICmpInst::ICMP_NE:
3191 case ICmpInst::ICMP_ULT:
3192 case ICmpInst::ICMP_ULE:
3193 return ConstantInt::getTrue(CI->getContext());
3195 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3196 // is non-negative then LHS <s RHS.
3197 case ICmpInst::ICMP_SGT:
3198 case ICmpInst::ICMP_SGE:
3199 return CI->getValue().isNegative() ?
3200 ConstantInt::getTrue(CI->getContext()) :
3201 ConstantInt::getFalse(CI->getContext());
3203 case ICmpInst::ICMP_SLT:
3204 case ICmpInst::ICMP_SLE:
3205 return CI->getValue().isNegative() ?
3206 ConstantInt::getFalse(CI->getContext()) :
3207 ConstantInt::getTrue(CI->getContext());
3213 if (isa<SExtInst>(LHS)) {
3214 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3216 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3217 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3218 // Compare X and Y. Note that the predicate does not change.
3219 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3223 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3224 // too. If not, then try to deduce the result of the comparison.
3225 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3226 // Compute the constant that would happen if we truncated to SrcTy then
3227 // reextended to DstTy.
3228 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3229 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3231 // If the re-extended constant didn't change then this is effectively
3232 // also a case of comparing two sign-extended values.
3233 if (RExt == CI && MaxRecurse)
3234 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3237 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3238 // bits there. Use this to work out the result of the comparison.
3241 default: llvm_unreachable("Unknown ICmp predicate!");
3242 case ICmpInst::ICMP_EQ:
3243 return ConstantInt::getFalse(CI->getContext());
3244 case ICmpInst::ICMP_NE:
3245 return ConstantInt::getTrue(CI->getContext());
3247 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3249 case ICmpInst::ICMP_SGT:
3250 case ICmpInst::ICMP_SGE:
3251 return CI->getValue().isNegative() ?
3252 ConstantInt::getTrue(CI->getContext()) :
3253 ConstantInt::getFalse(CI->getContext());
3254 case ICmpInst::ICMP_SLT:
3255 case ICmpInst::ICMP_SLE:
3256 return CI->getValue().isNegative() ?
3257 ConstantInt::getFalse(CI->getContext()) :
3258 ConstantInt::getTrue(CI->getContext());
3260 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3262 case ICmpInst::ICMP_UGT:
3263 case ICmpInst::ICMP_UGE:
3264 // Comparison is true iff the LHS <s 0.
3266 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3267 Constant::getNullValue(SrcTy),
3271 case ICmpInst::ICMP_ULT:
3272 case ICmpInst::ICMP_ULE:
3273 // Comparison is true iff the LHS >=s 0.
3275 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3276 Constant::getNullValue(SrcTy),
3286 // icmp eq|ne X, Y -> false|true if X != Y
3287 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
3288 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
3289 LLVMContext &Ctx = LHS->getType()->getContext();
3290 return Pred == ICmpInst::ICMP_NE ?
3291 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
3294 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3297 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3300 // Simplify comparisons of related pointers using a powerful, recursive
3301 // GEP-walk when we have target data available..
3302 if (LHS->getType()->isPointerTy())
3303 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS))
3305 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3306 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3307 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3308 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3309 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3310 Q.DL.getTypeSizeInBits(CRHS->getType()))
3311 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI,
3312 CLHS->getPointerOperand(),
3313 CRHS->getPointerOperand()))
3316 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3317 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3318 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3319 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3320 (ICmpInst::isEquality(Pred) ||
3321 (GLHS->isInBounds() && GRHS->isInBounds() &&
3322 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3323 // The bases are equal and the indices are constant. Build a constant
3324 // expression GEP with the same indices and a null base pointer to see
3325 // what constant folding can make out of it.
3326 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3327 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
3328 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3329 GLHS->getSourceElementType(), Null, IndicesLHS);
3331 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3332 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3333 GLHS->getSourceElementType(), Null, IndicesRHS);
3334 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3339 // If a bit is known to be zero for A and known to be one for B,
3340 // then A and B cannot be equal.
3341 if (ICmpInst::isEquality(Pred)) {
3342 const APInt *RHSVal;
3343 if (match(RHS, m_APInt(RHSVal))) {
3344 unsigned BitWidth = RHSVal->getBitWidth();
3345 APInt LHSKnownZero(BitWidth, 0);
3346 APInt LHSKnownOne(BitWidth, 0);
3347 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, /*Depth=*/0, Q.AC,
3349 if (((LHSKnownZero & *RHSVal) != 0) || ((LHSKnownOne & ~(*RHSVal)) != 0))
3350 return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
3351 : ConstantInt::getTrue(ITy);
3355 // If the comparison is with the result of a select instruction, check whether
3356 // comparing with either branch of the select always yields the same value.
3357 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3358 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3361 // If the comparison is with the result of a phi instruction, check whether
3362 // doing the compare with each incoming phi value yields a common result.
3363 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3364 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3370 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3371 const DataLayout &DL,
3372 const TargetLibraryInfo *TLI,
3373 const DominatorTree *DT, AssumptionCache *AC,
3374 const Instruction *CxtI) {
3375 return ::SimplifyICmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
3379 /// Given operands for an FCmpInst, see if we can fold the result.
3380 /// If not, this returns null.
3381 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3382 FastMathFlags FMF, const Query &Q,
3383 unsigned MaxRecurse) {
3384 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3385 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3387 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3388 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3389 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3391 // If we have a constant, make sure it is on the RHS.
3392 std::swap(LHS, RHS);
3393 Pred = CmpInst::getSwappedPredicate(Pred);
3396 // Fold trivial predicates.
3397 Type *RetTy = GetCompareTy(LHS);
3398 if (Pred == FCmpInst::FCMP_FALSE)
3399 return getFalse(RetTy);
3400 if (Pred == FCmpInst::FCMP_TRUE)
3401 return getTrue(RetTy);
3403 // UNO/ORD predicates can be trivially folded if NaNs are ignored.
3405 if (Pred == FCmpInst::FCMP_UNO)
3406 return getFalse(RetTy);
3407 if (Pred == FCmpInst::FCMP_ORD)
3408 return getTrue(RetTy);
3411 // fcmp pred x, undef and fcmp pred undef, x
3412 // fold to true if unordered, false if ordered
3413 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
3414 // Choosing NaN for the undef will always make unordered comparison succeed
3415 // and ordered comparison fail.
3416 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3419 // fcmp x,x -> true/false. Not all compares are foldable.
3421 if (CmpInst::isTrueWhenEqual(Pred))
3422 return getTrue(RetTy);
3423 if (CmpInst::isFalseWhenEqual(Pred))
3424 return getFalse(RetTy);
3427 // Handle fcmp with constant RHS
3428 const ConstantFP *CFP = nullptr;
3429 if (const auto *RHSC = dyn_cast<Constant>(RHS)) {
3430 if (RHS->getType()->isVectorTy())
3431 CFP = dyn_cast_or_null<ConstantFP>(RHSC->getSplatValue());
3433 CFP = dyn_cast<ConstantFP>(RHSC);
3436 // If the constant is a nan, see if we can fold the comparison based on it.
3437 if (CFP->getValueAPF().isNaN()) {
3438 if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo"
3439 return getFalse(RetTy);
3440 assert(FCmpInst::isUnordered(Pred) &&
3441 "Comparison must be either ordered or unordered!");
3442 // True if unordered.
3443 return getTrue(RetTy);
3445 // Check whether the constant is an infinity.
3446 if (CFP->getValueAPF().isInfinity()) {
3447 if (CFP->getValueAPF().isNegative()) {
3449 case FCmpInst::FCMP_OLT:
3450 // No value is ordered and less than negative infinity.
3451 return getFalse(RetTy);
3452 case FCmpInst::FCMP_UGE:
3453 // All values are unordered with or at least negative infinity.
3454 return getTrue(RetTy);
3460 case FCmpInst::FCMP_OGT:
3461 // No value is ordered and greater than infinity.
3462 return getFalse(RetTy);
3463 case FCmpInst::FCMP_ULE:
3464 // All values are unordered with and at most infinity.
3465 return getTrue(RetTy);
3471 if (CFP->getValueAPF().isZero()) {
3473 case FCmpInst::FCMP_UGE:
3474 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3475 return getTrue(RetTy);
3477 case FCmpInst::FCMP_OLT:
3479 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3480 return getFalse(RetTy);
3488 // If the comparison is with the result of a select instruction, check whether
3489 // comparing with either branch of the select always yields the same value.
3490 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3491 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3494 // If the comparison is with the result of a phi instruction, check whether
3495 // doing the compare with each incoming phi value yields a common result.
3496 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3497 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3503 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3504 FastMathFlags FMF, const DataLayout &DL,
3505 const TargetLibraryInfo *TLI,
3506 const DominatorTree *DT, AssumptionCache *AC,
3507 const Instruction *CxtI) {
3508 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF,
3509 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
3512 /// See if V simplifies when its operand Op is replaced with RepOp.
3513 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3515 unsigned MaxRecurse) {
3516 // Trivial replacement.
3520 auto *I = dyn_cast<Instruction>(V);
3524 // If this is a binary operator, try to simplify it with the replaced op.
3525 if (auto *B = dyn_cast<BinaryOperator>(I)) {
3527 // %cmp = icmp eq i32 %x, 2147483647
3528 // %add = add nsw i32 %x, 1
3529 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3531 // We can't replace %sel with %add unless we strip away the flags.
3532 if (isa<OverflowingBinaryOperator>(B))
3533 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
3535 if (isa<PossiblyExactOperator>(B))
3540 if (B->getOperand(0) == Op)
3541 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
3543 if (B->getOperand(1) == Op)
3544 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
3549 // Same for CmpInsts.
3550 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
3552 if (C->getOperand(0) == Op)
3553 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
3555 if (C->getOperand(1) == Op)
3556 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
3561 // TODO: We could hand off more cases to instsimplify here.
3563 // If all operands are constant after substituting Op for RepOp then we can
3564 // constant fold the instruction.
3565 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3566 // Build a list of all constant operands.
3567 SmallVector<Constant *, 8> ConstOps;
3568 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3569 if (I->getOperand(i) == Op)
3570 ConstOps.push_back(CRepOp);
3571 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
3572 ConstOps.push_back(COp);
3577 // All operands were constants, fold it.
3578 if (ConstOps.size() == I->getNumOperands()) {
3579 if (CmpInst *C = dyn_cast<CmpInst>(I))
3580 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3581 ConstOps[1], Q.DL, Q.TLI);
3583 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3584 if (!LI->isVolatile())
3585 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3587 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3594 /// Try to simplify a select instruction when its condition operand is an
3595 /// integer comparison where one operand of the compare is a constant.
3596 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3597 const APInt *Y, bool TrueWhenUnset) {
3600 // (X & Y) == 0 ? X & ~Y : X --> X
3601 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3602 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3604 return TrueWhenUnset ? FalseVal : TrueVal;
3606 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3607 // (X & Y) != 0 ? X : X & ~Y --> X
3608 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3610 return TrueWhenUnset ? FalseVal : TrueVal;
3612 if (Y->isPowerOf2()) {
3613 // (X & Y) == 0 ? X | Y : X --> X | Y
3614 // (X & Y) != 0 ? X | Y : X --> X
3615 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3617 return TrueWhenUnset ? TrueVal : FalseVal;
3619 // (X & Y) == 0 ? X : X | Y --> X
3620 // (X & Y) != 0 ? X : X | Y --> X | Y
3621 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3623 return TrueWhenUnset ? TrueVal : FalseVal;
3629 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
3631 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal,
3633 bool TrueWhenUnset) {
3634 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
3638 APInt MinSignedValue;
3640 if (match(CmpLHS, m_Trunc(m_Value(X))) && (X == TrueVal || X == FalseVal)) {
3641 // icmp slt (trunc X), 0 <--> icmp ne (and X, C), 0
3642 // icmp sgt (trunc X), -1 <--> icmp eq (and X, C), 0
3643 unsigned DestSize = CmpLHS->getType()->getScalarSizeInBits();
3644 MinSignedValue = APInt::getSignedMinValue(DestSize).zext(BitWidth);
3646 // icmp slt X, 0 <--> icmp ne (and X, C), 0
3647 // icmp sgt X, -1 <--> icmp eq (and X, C), 0
3649 MinSignedValue = APInt::getSignedMinValue(BitWidth);
3652 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, &MinSignedValue,
3659 /// Try to simplify a select instruction when its condition operand is an
3660 /// integer comparison.
3661 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
3662 Value *FalseVal, const Query &Q,
3663 unsigned MaxRecurse) {
3664 ICmpInst::Predicate Pred;
3665 Value *CmpLHS, *CmpRHS;
3666 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
3669 // FIXME: This code is nearly duplicated in InstCombine. Using/refactoring
3670 // decomposeBitTestICmp() might help.
3671 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) {
3674 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
3675 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
3676 Pred == ICmpInst::ICMP_EQ))
3678 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
3679 // Comparing signed-less-than 0 checks if the sign bit is set.
3680 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal,
3683 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
3684 // Comparing signed-greater-than -1 checks if the sign bit is not set.
3685 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal,
3690 if (CondVal->hasOneUse()) {
3692 if (match(CmpRHS, m_APInt(C))) {
3693 // X < MIN ? T : F --> F
3694 if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue())
3696 // X < MIN ? T : F --> F
3697 if (Pred == ICmpInst::ICMP_ULT && C->isMinValue())
3699 // X > MAX ? T : F --> F
3700 if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue())
3702 // X > MAX ? T : F --> F
3703 if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue())
3708 // If we have an equality comparison, then we know the value in one of the
3709 // arms of the select. See if substituting this value into the arm and
3710 // simplifying the result yields the same value as the other arm.
3711 if (Pred == ICmpInst::ICMP_EQ) {
3712 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3714 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3717 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3719 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3722 } else if (Pred == ICmpInst::ICMP_NE) {
3723 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3725 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3728 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3730 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3738 /// Given operands for a SelectInst, see if we can fold the result.
3739 /// If not, this returns null.
3740 static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
3741 Value *FalseVal, const Query &Q,
3742 unsigned MaxRecurse) {
3743 // select true, X, Y -> X
3744 // select false, X, Y -> Y
3745 if (Constant *CB = dyn_cast<Constant>(CondVal)) {
3746 if (CB->isAllOnesValue())
3748 if (CB->isNullValue())
3752 // select C, X, X -> X
3753 if (TrueVal == FalseVal)
3756 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
3757 if (isa<Constant>(TrueVal))
3761 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
3763 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
3767 simplifySelectWithICmpCond(CondVal, TrueVal, FalseVal, Q, MaxRecurse))
3773 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3774 const DataLayout &DL,
3775 const TargetLibraryInfo *TLI,
3776 const DominatorTree *DT, AssumptionCache *AC,
3777 const Instruction *CxtI) {
3778 return ::SimplifySelectInst(Cond, TrueVal, FalseVal,
3779 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
3782 /// Given operands for an GetElementPtrInst, see if we can fold the result.
3783 /// If not, this returns null.
3784 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3785 const Query &Q, unsigned) {
3786 // The type of the GEP pointer operand.
3788 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
3790 // getelementptr P -> P.
3791 if (Ops.size() == 1)
3794 // Compute the (pointer) type returned by the GEP instruction.
3795 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
3796 Type *GEPTy = PointerType::get(LastType, AS);
3797 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
3798 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3799 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
3800 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3802 if (isa<UndefValue>(Ops[0]))
3803 return UndefValue::get(GEPTy);
3805 if (Ops.size() == 2) {
3806 // getelementptr P, 0 -> P.
3807 if (match(Ops[1], m_Zero()))
3811 if (Ty->isSized()) {
3814 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
3815 // getelementptr P, N -> P if P points to a type of zero size.
3816 if (TyAllocSize == 0)
3819 // The following transforms are only safe if the ptrtoint cast
3820 // doesn't truncate the pointers.
3821 if (Ops[1]->getType()->getScalarSizeInBits() ==
3822 Q.DL.getPointerSizeInBits(AS)) {
3823 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
3824 if (match(P, m_Zero()))
3825 return Constant::getNullValue(GEPTy);
3827 if (match(P, m_PtrToInt(m_Value(Temp))))
3828 if (Temp->getType() == GEPTy)
3833 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
3834 if (TyAllocSize == 1 &&
3835 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
3836 if (Value *R = PtrToIntOrZero(P))
3839 // getelementptr V, (ashr (sub P, V), C) -> Q
3840 // if P points to a type of size 1 << C.
3842 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3843 m_ConstantInt(C))) &&
3844 TyAllocSize == 1ULL << C)
3845 if (Value *R = PtrToIntOrZero(P))
3848 // getelementptr V, (sdiv (sub P, V), C) -> Q
3849 // if P points to a type of size C.
3851 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3852 m_SpecificInt(TyAllocSize))))
3853 if (Value *R = PtrToIntOrZero(P))
3859 if (Q.DL.getTypeAllocSize(LastType) == 1 &&
3860 all_of(Ops.slice(1).drop_back(1),
3861 [](Value *Idx) { return match(Idx, m_Zero()); })) {
3863 Q.DL.getPointerSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
3864 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == PtrWidth) {
3865 APInt BasePtrOffset(PtrWidth, 0);
3866 Value *StrippedBasePtr =
3867 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
3870 // gep (gep V, C), (sub 0, V) -> C
3871 if (match(Ops.back(),
3872 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) {
3873 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
3874 return ConstantExpr::getIntToPtr(CI, GEPTy);
3876 // gep (gep V, C), (xor V, -1) -> C-1
3877 if (match(Ops.back(),
3878 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) {
3879 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
3880 return ConstantExpr::getIntToPtr(CI, GEPTy);
3885 // Check to see if this is constant foldable.
3886 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3887 if (!isa<Constant>(Ops[i]))
3890 return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
3894 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3895 const DataLayout &DL,
3896 const TargetLibraryInfo *TLI,
3897 const DominatorTree *DT, AssumptionCache *AC,
3898 const Instruction *CxtI) {
3899 return ::SimplifyGEPInst(SrcTy, Ops,
3900 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
3903 /// Given operands for an InsertValueInst, see if we can fold the result.
3904 /// If not, this returns null.
3905 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
3906 ArrayRef<unsigned> Idxs, const Query &Q,
3908 if (Constant *CAgg = dyn_cast<Constant>(Agg))
3909 if (Constant *CVal = dyn_cast<Constant>(Val))
3910 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
3912 // insertvalue x, undef, n -> x
3913 if (match(Val, m_Undef()))
3916 // insertvalue x, (extractvalue y, n), n
3917 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
3918 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
3919 EV->getIndices() == Idxs) {
3920 // insertvalue undef, (extractvalue y, n), n -> y
3921 if (match(Agg, m_Undef()))
3922 return EV->getAggregateOperand();
3924 // insertvalue y, (extractvalue y, n), n -> y
3925 if (Agg == EV->getAggregateOperand())
3932 Value *llvm::SimplifyInsertValueInst(
3933 Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
3934 const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
3935 const Instruction *CxtI) {
3936 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI),
3940 /// Given operands for an ExtractValueInst, see if we can fold the result.
3941 /// If not, this returns null.
3942 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3943 const Query &, unsigned) {
3944 if (auto *CAgg = dyn_cast<Constant>(Agg))
3945 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
3947 // extractvalue x, (insertvalue y, elt, n), n -> elt
3948 unsigned NumIdxs = Idxs.size();
3949 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
3950 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
3951 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
3952 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
3953 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
3954 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
3955 Idxs.slice(0, NumCommonIdxs)) {
3956 if (NumIdxs == NumInsertValueIdxs)
3957 return IVI->getInsertedValueOperand();
3965 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3966 const DataLayout &DL,
3967 const TargetLibraryInfo *TLI,
3968 const DominatorTree *DT,
3969 AssumptionCache *AC,
3970 const Instruction *CxtI) {
3971 return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, AC, CxtI),
3975 /// Given operands for an ExtractElementInst, see if we can fold the result.
3976 /// If not, this returns null.
3977 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &,
3979 if (auto *CVec = dyn_cast<Constant>(Vec)) {
3980 if (auto *CIdx = dyn_cast<Constant>(Idx))
3981 return ConstantFoldExtractElementInstruction(CVec, CIdx);
3983 // The index is not relevant if our vector is a splat.
3984 if (auto *Splat = CVec->getSplatValue())
3987 if (isa<UndefValue>(Vec))
3988 return UndefValue::get(Vec->getType()->getVectorElementType());
3991 // If extracting a specified index from the vector, see if we can recursively
3992 // find a previously computed scalar that was inserted into the vector.
3993 if (auto *IdxC = dyn_cast<ConstantInt>(Idx))
3994 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4000 Value *llvm::SimplifyExtractElementInst(
4001 Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI,
4002 const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) {
4003 return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, AC, CxtI),
4007 /// See if we can fold the given phi. If not, returns null.
4008 static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
4009 // If all of the PHI's incoming values are the same then replace the PHI node
4010 // with the common value.
4011 Value *CommonValue = nullptr;
4012 bool HasUndefInput = false;
4013 for (Value *Incoming : PN->incoming_values()) {
4014 // If the incoming value is the phi node itself, it can safely be skipped.
4015 if (Incoming == PN) continue;
4016 if (isa<UndefValue>(Incoming)) {
4017 // Remember that we saw an undef value, but otherwise ignore them.
4018 HasUndefInput = true;
4021 if (CommonValue && Incoming != CommonValue)
4022 return nullptr; // Not the same, bail out.
4023 CommonValue = Incoming;
4026 // If CommonValue is null then all of the incoming values were either undef or
4027 // equal to the phi node itself.
4029 return UndefValue::get(PN->getType());
4031 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4032 // instruction, we cannot return X as the result of the PHI node unless it
4033 // dominates the PHI block.
4035 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4040 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4041 Type *Ty, const Query &Q, unsigned MaxRecurse) {
4042 if (auto *C = dyn_cast<Constant>(Op))
4043 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4045 if (auto *CI = dyn_cast<CastInst>(Op)) {
4046 auto *Src = CI->getOperand(0);
4047 Type *SrcTy = Src->getType();
4048 Type *MidTy = CI->getType();
4050 if (Src->getType() == Ty) {
4051 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4052 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4054 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4056 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4058 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4059 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4060 SrcIntPtrTy, MidIntPtrTy,
4061 DstIntPtrTy) == Instruction::BitCast)
4067 if (CastOpc == Instruction::BitCast)
4068 if (Op->getType() == Ty)
4074 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4075 const DataLayout &DL,
4076 const TargetLibraryInfo *TLI,
4077 const DominatorTree *DT, AssumptionCache *AC,
4078 const Instruction *CxtI) {
4079 return ::SimplifyCastInst(CastOpc, Op, Ty, Query(DL, TLI, DT, AC, CxtI),
4083 /// For the given destination element of a shuffle, peek through shuffles to
4084 /// match a root vector source operand that contains that element in the same
4085 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4086 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4087 Constant *Mask, Value *RootVec, int RootElt,
4088 unsigned MaxRecurse) {
4092 // Bail out if any mask value is undefined. That kind of shuffle may be
4093 // simplified further based on demanded bits or other folds.
4094 int MaskVal = ShuffleVectorInst::getMaskValue(Mask, RootElt);
4098 // The mask value chooses which source operand we need to look at next.
4100 int InVecNumElts = Op0->getType()->getVectorNumElements();
4101 if (MaskVal < InVecNumElts) {
4105 RootElt = MaskVal - InVecNumElts;
4109 // If the source operand is a shuffle itself, look through it to find the
4110 // matching root vector.
4111 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4112 return foldIdentityShuffles(
4113 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4114 SourceShuf->getMask(), RootVec, RootElt, MaxRecurse);
4117 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4120 // The source operand is not a shuffle. Initialize the root vector value for
4121 // this shuffle if that has not been done yet.
4125 // Give up as soon as a source operand does not match the existing root value.
4126 if (RootVec != SourceOp)
4129 // The element must be coming from the same lane in the source vector
4130 // (although it may have crossed lanes in intermediate shuffles).
4131 if (RootElt != DestElt)
4137 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4138 Type *RetTy, const Query &Q,
4139 unsigned MaxRecurse) {
4140 Type *InVecTy = Op0->getType();
4141 unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
4142 unsigned InVecNumElts = InVecTy->getVectorNumElements();
4144 auto *Op0Const = dyn_cast<Constant>(Op0);
4145 auto *Op1Const = dyn_cast<Constant>(Op1);
4147 // If all operands are constant, constant fold the shuffle.
4148 if (Op0Const && Op1Const)
4149 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
4151 // If only one of the operands is constant, constant fold the shuffle if the
4152 // mask does not select elements from the variable operand.
4153 bool MaskSelects0 = false, MaskSelects1 = false;
4154 for (unsigned i = 0; i != MaskNumElts; ++i) {
4155 int Idx = ShuffleVectorInst::getMaskValue(Mask, i);
4158 if ((unsigned)Idx < InVecNumElts)
4159 MaskSelects0 = true;
4161 MaskSelects1 = true;
4163 if (!MaskSelects0 && Op1Const)
4164 return ConstantFoldShuffleVectorInstruction(UndefValue::get(InVecTy),
4166 if (!MaskSelects1 && Op0Const)
4167 return ConstantFoldShuffleVectorInstruction(Op0Const,
4168 UndefValue::get(InVecTy), Mask);
4170 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4171 // value type is same as the input vectors' type.
4172 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4173 if (!MaskSelects1 && RetTy == InVecTy &&
4174 OpShuf->getMask()->getSplatValue())
4176 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op1))
4177 if (!MaskSelects0 && RetTy == InVecTy &&
4178 OpShuf->getMask()->getSplatValue())
4181 // Don't fold a shuffle with undef mask elements. This may get folded in a
4182 // better way using demanded bits or other analysis.
4183 // TODO: Should we allow this?
4184 for (unsigned i = 0; i != MaskNumElts; ++i)
4185 if (ShuffleVectorInst::getMaskValue(Mask, i) == -1)
4188 // Check if every element of this shuffle can be mapped back to the
4189 // corresponding element of a single root vector. If so, we don't need this
4190 // shuffle. This handles simple identity shuffles as well as chains of
4191 // shuffles that may widen/narrow and/or move elements across lanes and back.
4192 Value *RootVec = nullptr;
4193 for (unsigned i = 0; i != MaskNumElts; ++i) {
4194 // Note that recursion is limited for each vector element, so if any element
4195 // exceeds the limit, this will fail to simplify.
4196 RootVec = foldIdentityShuffles(i, Op0, Op1, Mask, RootVec, i, MaxRecurse);
4198 // We can't replace a widening/narrowing shuffle with one of its operands.
4199 if (!RootVec || RootVec->getType() != RetTy)
4205 /// Given operands for a ShuffleVectorInst, fold the result or return null.
4206 Value *llvm::SimplifyShuffleVectorInst(
4207 Value *Op0, Value *Op1, Constant *Mask, Type *RetTy,
4208 const DataLayout &DL, const TargetLibraryInfo *TLI, const DominatorTree *DT,
4209 AssumptionCache *AC, const Instruction *CxtI) {
4210 return ::SimplifyShuffleVectorInst(
4211 Op0, Op1, Mask, RetTy, Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
4214 //=== Helper functions for higher up the class hierarchy.
4216 /// Given operands for a BinaryOperator, see if we can fold the result.
4217 /// If not, this returns null.
4218 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4219 const Query &Q, unsigned MaxRecurse) {
4221 case Instruction::Add:
4222 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
4223 case Instruction::FAdd:
4224 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4225 case Instruction::Sub:
4226 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
4227 case Instruction::FSub:
4228 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4229 case Instruction::Mul:
4230 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
4231 case Instruction::FMul:
4232 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4233 case Instruction::SDiv:
4234 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
4235 case Instruction::UDiv:
4236 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
4237 case Instruction::FDiv:
4238 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4239 case Instruction::SRem:
4240 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
4241 case Instruction::URem:
4242 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
4243 case Instruction::FRem:
4244 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4245 case Instruction::Shl:
4246 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
4247 case Instruction::LShr:
4248 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
4249 case Instruction::AShr:
4250 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
4251 case Instruction::And:
4252 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
4253 case Instruction::Or:
4254 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
4255 case Instruction::Xor:
4256 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
4258 llvm_unreachable("Unexpected opcode");
4262 /// Given operands for a BinaryOperator, see if we can fold the result.
4263 /// If not, this returns null.
4264 /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
4265 /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
4266 static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4267 const FastMathFlags &FMF, const Query &Q,
4268 unsigned MaxRecurse) {
4270 case Instruction::FAdd:
4271 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
4272 case Instruction::FSub:
4273 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
4274 case Instruction::FMul:
4275 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
4276 case Instruction::FDiv:
4277 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
4279 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
4283 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4284 const DataLayout &DL, const TargetLibraryInfo *TLI,
4285 const DominatorTree *DT, AssumptionCache *AC,
4286 const Instruction *CxtI) {
4287 return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
4291 Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4292 const FastMathFlags &FMF, const DataLayout &DL,
4293 const TargetLibraryInfo *TLI,
4294 const DominatorTree *DT, AssumptionCache *AC,
4295 const Instruction *CxtI) {
4296 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, AC, CxtI),
4300 /// Given operands for a CmpInst, see if we can fold the result.
4301 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4302 const Query &Q, unsigned MaxRecurse) {
4303 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
4304 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
4305 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4308 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4309 const DataLayout &DL, const TargetLibraryInfo *TLI,
4310 const DominatorTree *DT, AssumptionCache *AC,
4311 const Instruction *CxtI) {
4312 return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
4316 static bool IsIdempotent(Intrinsic::ID ID) {
4318 default: return false;
4320 // Unary idempotent: f(f(x)) = f(x)
4321 case Intrinsic::fabs:
4322 case Intrinsic::floor:
4323 case Intrinsic::ceil:
4324 case Intrinsic::trunc:
4325 case Intrinsic::rint:
4326 case Intrinsic::nearbyint:
4327 case Intrinsic::round:
4332 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
4333 const DataLayout &DL) {
4334 GlobalValue *PtrSym;
4336 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
4339 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
4340 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
4341 Type *Int32PtrTy = Int32Ty->getPointerTo();
4342 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
4344 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
4345 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
4348 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
4349 if (OffsetInt % 4 != 0)
4352 Constant *C = ConstantExpr::getGetElementPtr(
4353 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
4354 ConstantInt::get(Int64Ty, OffsetInt / 4));
4355 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
4359 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
4363 if (LoadedCE->getOpcode() == Instruction::Trunc) {
4364 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4369 if (LoadedCE->getOpcode() != Instruction::Sub)
4372 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4373 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
4375 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
4377 Constant *LoadedRHS = LoadedCE->getOperand(1);
4378 GlobalValue *LoadedRHSSym;
4379 APInt LoadedRHSOffset;
4380 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
4382 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
4385 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
4388 static bool maskIsAllZeroOrUndef(Value *Mask) {
4389 auto *ConstMask = dyn_cast<Constant>(Mask);
4392 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
4394 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
4396 if (auto *MaskElt = ConstMask->getAggregateElement(I))
4397 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
4404 template <typename IterTy>
4405 static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
4406 const Query &Q, unsigned MaxRecurse) {
4407 Intrinsic::ID IID = F->getIntrinsicID();
4408 unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
4411 if (NumOperands == 1) {
4412 // Perform idempotent optimizations
4413 if (IsIdempotent(IID)) {
4414 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) {
4415 if (II->getIntrinsicID() == IID)
4421 case Intrinsic::fabs: {
4422 if (SignBitMustBeZero(*ArgBegin, Q.TLI))
4432 if (NumOperands == 2) {
4433 Value *LHS = *ArgBegin;
4434 Value *RHS = *(ArgBegin + 1);
4435 Type *ReturnType = F->getReturnType();
4438 case Intrinsic::usub_with_overflow:
4439 case Intrinsic::ssub_with_overflow: {
4440 // X - X -> { 0, false }
4442 return Constant::getNullValue(ReturnType);
4444 // X - undef -> undef
4445 // undef - X -> undef
4446 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4447 return UndefValue::get(ReturnType);
4451 case Intrinsic::uadd_with_overflow:
4452 case Intrinsic::sadd_with_overflow: {
4453 // X + undef -> undef
4454 if (isa<UndefValue>(RHS))
4455 return UndefValue::get(ReturnType);
4459 case Intrinsic::umul_with_overflow:
4460 case Intrinsic::smul_with_overflow: {
4461 // X * 0 -> { 0, false }
4462 if (match(RHS, m_Zero()))
4463 return Constant::getNullValue(ReturnType);
4465 // X * undef -> { 0, false }
4466 if (match(RHS, m_Undef()))
4467 return Constant::getNullValue(ReturnType);
4471 case Intrinsic::load_relative: {
4472 Constant *C0 = dyn_cast<Constant>(LHS);
4473 Constant *C1 = dyn_cast<Constant>(RHS);
4475 return SimplifyRelativeLoad(C0, C1, Q.DL);
4483 // Simplify calls to llvm.masked.load.*
4485 case Intrinsic::masked_load: {
4486 Value *MaskArg = ArgBegin[2];
4487 Value *PassthruArg = ArgBegin[3];
4488 // If the mask is all zeros or undef, the "passthru" argument is the result.
4489 if (maskIsAllZeroOrUndef(MaskArg))
4498 template <typename IterTy>
4499 static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
4500 const Query &Q, unsigned MaxRecurse) {
4501 Type *Ty = V->getType();
4502 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
4503 Ty = PTy->getElementType();
4504 FunctionType *FTy = cast<FunctionType>(Ty);
4506 // call undef -> undef
4507 // call null -> undef
4508 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4509 return UndefValue::get(FTy->getReturnType());
4511 Function *F = dyn_cast<Function>(V);
4515 if (F->isIntrinsic())
4516 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
4519 if (!canConstantFoldCallTo(F))
4522 SmallVector<Constant *, 4> ConstantArgs;
4523 ConstantArgs.reserve(ArgEnd - ArgBegin);
4524 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
4525 Constant *C = dyn_cast<Constant>(*I);
4528 ConstantArgs.push_back(C);
4531 return ConstantFoldCall(F, ConstantArgs, Q.TLI);
4534 Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
4535 User::op_iterator ArgEnd, const DataLayout &DL,
4536 const TargetLibraryInfo *TLI, const DominatorTree *DT,
4537 AssumptionCache *AC, const Instruction *CxtI) {
4538 return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI),
4542 Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
4543 const DataLayout &DL, const TargetLibraryInfo *TLI,
4544 const DominatorTree *DT, AssumptionCache *AC,
4545 const Instruction *CxtI) {
4546 return ::SimplifyCall(V, Args.begin(), Args.end(),
4547 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
4550 /// See if we can compute a simplified version of this instruction.
4551 /// If not, this returns null.
4552 Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
4553 const TargetLibraryInfo *TLI,
4554 const DominatorTree *DT, AssumptionCache *AC,
4555 OptimizationRemarkEmitter *ORE) {
4558 switch (I->getOpcode()) {
4560 Result = ConstantFoldInstruction(I, DL, TLI);
4562 case Instruction::FAdd:
4563 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
4564 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4566 case Instruction::Add:
4567 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
4568 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4569 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
4572 case Instruction::FSub:
4573 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
4574 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4576 case Instruction::Sub:
4577 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
4578 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4579 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
4582 case Instruction::FMul:
4583 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
4584 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4586 case Instruction::Mul:
4588 SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4590 case Instruction::SDiv:
4591 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4594 case Instruction::UDiv:
4595 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4598 case Instruction::FDiv:
4599 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
4600 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4602 case Instruction::SRem:
4603 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4606 case Instruction::URem:
4607 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4610 case Instruction::FRem:
4611 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
4612 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4614 case Instruction::Shl:
4615 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
4616 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4617 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
4620 case Instruction::LShr:
4621 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
4622 cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
4625 case Instruction::AShr:
4626 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
4627 cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
4630 case Instruction::And:
4632 SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4634 case Instruction::Or:
4636 SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4638 case Instruction::Xor:
4640 SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4642 case Instruction::ICmp:
4644 SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), I->getOperand(0),
4645 I->getOperand(1), DL, TLI, DT, AC, I);
4647 case Instruction::FCmp:
4648 Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
4649 I->getOperand(0), I->getOperand(1),
4650 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4652 case Instruction::Select:
4653 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
4654 I->getOperand(2), DL, TLI, DT, AC, I);
4656 case Instruction::GetElementPtr: {
4657 SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
4658 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
4659 Ops, DL, TLI, DT, AC, I);
4662 case Instruction::InsertValue: {
4663 InsertValueInst *IV = cast<InsertValueInst>(I);
4664 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
4665 IV->getInsertedValueOperand(),
4666 IV->getIndices(), DL, TLI, DT, AC, I);
4669 case Instruction::ExtractValue: {
4670 auto *EVI = cast<ExtractValueInst>(I);
4671 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
4672 EVI->getIndices(), DL, TLI, DT, AC, I);
4675 case Instruction::ExtractElement: {
4676 auto *EEI = cast<ExtractElementInst>(I);
4677 Result = SimplifyExtractElementInst(
4678 EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC, I);
4681 case Instruction::ShuffleVector: {
4682 auto *SVI = cast<ShuffleVectorInst>(I);
4683 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
4684 SVI->getMask(), SVI->getType(), DL, TLI,
4688 case Instruction::PHI:
4689 Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, AC, I));
4691 case Instruction::Call: {
4692 CallSite CS(cast<CallInst>(I));
4693 Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), DL,
4697 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
4698 #include "llvm/IR/Instruction.def"
4699 #undef HANDLE_CAST_INST
4700 Result = SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(),
4701 DL, TLI, DT, AC, I);
4703 case Instruction::Alloca:
4704 // No simplifications for Alloca and it can't be constant folded.
4709 // In general, it is possible for computeKnownBits to determine all bits in a
4710 // value even when the operands are not all constants.
4711 if (!Result && I->getType()->isIntOrIntVectorTy()) {
4712 unsigned BitWidth = I->getType()->getScalarSizeInBits();
4713 APInt KnownZero(BitWidth, 0);
4714 APInt KnownOne(BitWidth, 0);
4715 computeKnownBits(I, KnownZero, KnownOne, DL, /*Depth*/0, AC, I, DT, ORE);
4716 if ((KnownZero | KnownOne).isAllOnesValue())
4717 Result = ConstantInt::get(I->getType(), KnownOne);
4720 /// If called on unreachable code, the above logic may report that the
4721 /// instruction simplified to itself. Make life easier for users by
4722 /// detecting that case here, returning a safe value instead.
4723 return Result == I ? UndefValue::get(I->getType()) : Result;
4726 /// \brief Implementation of recursive simplification through an instruction's
4729 /// This is the common implementation of the recursive simplification routines.
4730 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
4731 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
4732 /// instructions to process and attempt to simplify it using
4733 /// InstructionSimplify.
4735 /// This routine returns 'true' only when *it* simplifies something. The passed
4736 /// in simplified value does not count toward this.
4737 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
4738 const TargetLibraryInfo *TLI,
4739 const DominatorTree *DT,
4740 AssumptionCache *AC) {
4741 bool Simplified = false;
4742 SmallSetVector<Instruction *, 8> Worklist;
4743 const DataLayout &DL = I->getModule()->getDataLayout();
4745 // If we have an explicit value to collapse to, do that round of the
4746 // simplification loop by hand initially.
4748 for (User *U : I->users())
4750 Worklist.insert(cast<Instruction>(U));
4752 // Replace the instruction with its simplified value.
4753 I->replaceAllUsesWith(SimpleV);
4755 // Gracefully handle edge cases where the instruction is not wired into any
4757 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4758 !I->mayHaveSideEffects())
4759 I->eraseFromParent();
4764 // Note that we must test the size on each iteration, the worklist can grow.
4765 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
4768 // See if this instruction simplifies.
4769 SimpleV = SimplifyInstruction(I, DL, TLI, DT, AC);
4775 // Stash away all the uses of the old instruction so we can check them for
4776 // recursive simplifications after a RAUW. This is cheaper than checking all
4777 // uses of To on the recursive step in most cases.
4778 for (User *U : I->users())
4779 Worklist.insert(cast<Instruction>(U));
4781 // Replace the instruction with its simplified value.
4782 I->replaceAllUsesWith(SimpleV);
4784 // Gracefully handle edge cases where the instruction is not wired into any
4786 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4787 !I->mayHaveSideEffects())
4788 I->eraseFromParent();
4793 bool llvm::recursivelySimplifyInstruction(Instruction *I,
4794 const TargetLibraryInfo *TLI,
4795 const DominatorTree *DT,
4796 AssumptionCache *AC) {
4797 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
4800 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
4801 const TargetLibraryInfo *TLI,
4802 const DominatorTree *DT,
4803 AssumptionCache *AC) {
4804 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
4805 assert(SimpleV && "Must provide a simplified value.");
4806 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);