1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements routines for folding instructions into simpler forms
11 // that do not require creating new instructions. This does constant folding
12 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
13 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
14 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
15 // simplified: This is usually true and assuming it simplifies the logic (if
16 // they have not been simplified then results are correct but maybe suboptimal).
18 //===----------------------------------------------------------------------===//
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/IR/ConstantRange.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/GetElementPtrTypeIterator.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/IR/ValueHandle.h"
39 using namespace llvm::PatternMatch;
41 #define DEBUG_TYPE "instsimplify"
43 enum { RecursionLimit = 3 };
45 STATISTIC(NumExpand, "Number of expansions");
46 STATISTIC(NumReassoc, "Number of reassociations");
51 const TargetLibraryInfo *TLI;
52 const DominatorTree *DT;
54 const Instruction *CxtI;
56 Query(const DataLayout &DL, const TargetLibraryInfo *tli,
57 const DominatorTree *dt, AssumptionCache *ac = nullptr,
58 const Instruction *cxti = nullptr)
59 : DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
61 } // end anonymous namespace
63 static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
64 static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &,
66 static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
67 const Query &, unsigned);
68 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &,
70 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
71 const Query &Q, unsigned MaxRecurse);
72 static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned);
73 static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned);
74 static Value *SimplifyCastInst(unsigned, Value *, Type *,
75 const Query &, unsigned);
77 /// For a boolean type, or a vector of boolean type, return false, or
78 /// a vector with every element false, as appropriate for the type.
79 static Constant *getFalse(Type *Ty) {
80 assert(Ty->getScalarType()->isIntegerTy(1) &&
81 "Expected i1 type or a vector of i1!");
82 return Constant::getNullValue(Ty);
85 /// For a boolean type, or a vector of boolean type, return true, or
86 /// a vector with every element true, as appropriate for the type.
87 static Constant *getTrue(Type *Ty) {
88 assert(Ty->getScalarType()->isIntegerTy(1) &&
89 "Expected i1 type or a vector of i1!");
90 return Constant::getAllOnesValue(Ty);
93 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
94 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
96 CmpInst *Cmp = dyn_cast<CmpInst>(V);
99 CmpInst::Predicate CPred = Cmp->getPredicate();
100 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
101 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
103 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
107 /// Does the given value dominate the specified phi node?
108 static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
109 Instruction *I = dyn_cast<Instruction>(V);
111 // Arguments and constants dominate all instructions.
114 // If we are processing instructions (and/or basic blocks) that have not been
115 // fully added to a function, the parent nodes may still be null. Simply
116 // return the conservative answer in these cases.
117 if (!I->getParent() || !P->getParent() || !I->getParent()->getParent())
120 // If we have a DominatorTree then do a precise test.
122 if (!DT->isReachableFromEntry(P->getParent()))
124 if (!DT->isReachableFromEntry(I->getParent()))
126 return DT->dominates(I, P);
129 // Otherwise, if the instruction is in the entry block and is not an invoke,
130 // then it obviously dominates all phi nodes.
131 if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() &&
138 /// Simplify "A op (B op' C)" by distributing op over op', turning it into
139 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
140 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
141 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
142 /// Returns the simplified value, or null if no simplification was performed.
143 static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
144 unsigned OpcToExpand, const Query &Q,
145 unsigned MaxRecurse) {
146 Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
147 // Recursion is always used, so bail out at once if we already hit the limit.
151 // Check whether the expression has the form "(A op' B) op C".
152 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
153 if (Op0->getOpcode() == OpcodeToExpand) {
154 // It does! Try turning it into "(A op C) op' (B op C)".
155 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
156 // Do "A op C" and "B op C" both simplify?
157 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
158 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
159 // They do! Return "L op' R" if it simplifies or is already available.
160 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
161 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
162 && L == B && R == A)) {
166 // Otherwise return "L op' R" if it simplifies.
167 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
174 // Check whether the expression has the form "A op (B op' C)".
175 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
176 if (Op1->getOpcode() == OpcodeToExpand) {
177 // It does! Try turning it into "(A op B) op' (A op C)".
178 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
179 // Do "A op B" and "A op C" both simplify?
180 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
181 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
182 // They do! Return "L op' R" if it simplifies or is already available.
183 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
184 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
185 && L == C && R == B)) {
189 // Otherwise return "L op' R" if it simplifies.
190 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
200 /// Generic simplifications for associative binary operations.
201 /// Returns the simpler value, or null if none was found.
202 static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
203 const Query &Q, unsigned MaxRecurse) {
204 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc;
205 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
207 // Recursion is always used, so bail out at once if we already hit the limit.
211 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
212 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
214 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
215 if (Op0 && Op0->getOpcode() == Opcode) {
216 Value *A = Op0->getOperand(0);
217 Value *B = Op0->getOperand(1);
220 // Does "B op C" simplify?
221 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
222 // It does! Return "A op V" if it simplifies or is already available.
223 // If V equals B then "A op V" is just the LHS.
224 if (V == B) return LHS;
225 // Otherwise return "A op V" if it simplifies.
226 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
233 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
234 if (Op1 && Op1->getOpcode() == Opcode) {
236 Value *B = Op1->getOperand(0);
237 Value *C = Op1->getOperand(1);
239 // Does "A op B" simplify?
240 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
241 // It does! Return "V op C" if it simplifies or is already available.
242 // If V equals B then "V op C" is just the RHS.
243 if (V == B) return RHS;
244 // Otherwise return "V op C" if it simplifies.
245 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
252 // The remaining transforms require commutativity as well as associativity.
253 if (!Instruction::isCommutative(Opcode))
256 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
257 if (Op0 && Op0->getOpcode() == Opcode) {
258 Value *A = Op0->getOperand(0);
259 Value *B = Op0->getOperand(1);
262 // Does "C op A" simplify?
263 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
264 // It does! Return "V op B" if it simplifies or is already available.
265 // If V equals A then "V op B" is just the LHS.
266 if (V == A) return LHS;
267 // Otherwise return "V op B" if it simplifies.
268 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
275 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
276 if (Op1 && Op1->getOpcode() == Opcode) {
278 Value *B = Op1->getOperand(0);
279 Value *C = Op1->getOperand(1);
281 // Does "C op A" simplify?
282 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
283 // It does! Return "B op V" if it simplifies or is already available.
284 // If V equals C then "B op V" is just the RHS.
285 if (V == C) return RHS;
286 // Otherwise return "B op V" if it simplifies.
287 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
297 /// In the case of a binary operation with a select instruction as an operand,
298 /// try to simplify the binop by seeing whether evaluating it on both branches
299 /// of the select results in the same value. Returns the common value if so,
300 /// otherwise returns null.
301 static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
302 const Query &Q, unsigned MaxRecurse) {
303 // Recursion is always used, so bail out at once if we already hit the limit.
308 if (isa<SelectInst>(LHS)) {
309 SI = cast<SelectInst>(LHS);
311 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
312 SI = cast<SelectInst>(RHS);
315 // Evaluate the BinOp on the true and false branches of the select.
319 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
320 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
322 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
323 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
326 // If they simplified to the same value, then return the common value.
327 // If they both failed to simplify then return null.
331 // If one branch simplified to undef, return the other one.
332 if (TV && isa<UndefValue>(TV))
334 if (FV && isa<UndefValue>(FV))
337 // If applying the operation did not change the true and false select values,
338 // then the result of the binop is the select itself.
339 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
342 // If one branch simplified and the other did not, and the simplified
343 // value is equal to the unsimplified one, return the simplified value.
344 // For example, select (cond, X, X & Z) & Z -> X & Z.
345 if ((FV && !TV) || (TV && !FV)) {
346 // Check that the simplified value has the form "X op Y" where "op" is the
347 // same as the original operation.
348 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
349 if (Simplified && Simplified->getOpcode() == Opcode) {
350 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
351 // We already know that "op" is the same as for the simplified value. See
352 // if the operands match too. If so, return the simplified value.
353 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
354 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
355 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
356 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
357 Simplified->getOperand(1) == UnsimplifiedRHS)
359 if (Simplified->isCommutative() &&
360 Simplified->getOperand(1) == UnsimplifiedLHS &&
361 Simplified->getOperand(0) == UnsimplifiedRHS)
369 /// In the case of a comparison with a select instruction, try to simplify the
370 /// comparison by seeing whether both branches of the select result in the same
371 /// value. Returns the common value if so, otherwise returns null.
372 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
373 Value *RHS, const Query &Q,
374 unsigned MaxRecurse) {
375 // Recursion is always used, so bail out at once if we already hit the limit.
379 // Make sure the select is on the LHS.
380 if (!isa<SelectInst>(LHS)) {
382 Pred = CmpInst::getSwappedPredicate(Pred);
384 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
385 SelectInst *SI = cast<SelectInst>(LHS);
386 Value *Cond = SI->getCondition();
387 Value *TV = SI->getTrueValue();
388 Value *FV = SI->getFalseValue();
390 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
391 // Does "cmp TV, RHS" simplify?
392 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
394 // It not only simplified, it simplified to the select condition. Replace
396 TCmp = getTrue(Cond->getType());
398 // It didn't simplify. However if "cmp TV, RHS" is equal to the select
399 // condition then we can replace it with 'true'. Otherwise give up.
400 if (!isSameCompare(Cond, Pred, TV, RHS))
402 TCmp = getTrue(Cond->getType());
405 // Does "cmp FV, RHS" simplify?
406 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
408 // It not only simplified, it simplified to the select condition. Replace
410 FCmp = getFalse(Cond->getType());
412 // It didn't simplify. However if "cmp FV, RHS" is equal to the select
413 // condition then we can replace it with 'false'. Otherwise give up.
414 if (!isSameCompare(Cond, Pred, FV, RHS))
416 FCmp = getFalse(Cond->getType());
419 // If both sides simplified to the same value, then use it as the result of
420 // the original comparison.
424 // The remaining cases only make sense if the select condition has the same
425 // type as the result of the comparison, so bail out if this is not so.
426 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
428 // If the false value simplified to false, then the result of the compare
429 // is equal to "Cond && TCmp". This also catches the case when the false
430 // value simplified to false and the true value to true, returning "Cond".
431 if (match(FCmp, m_Zero()))
432 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
434 // If the true value simplified to true, then the result of the compare
435 // is equal to "Cond || FCmp".
436 if (match(TCmp, m_One()))
437 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
439 // Finally, if the false value simplified to true and the true value to
440 // false, then the result of the compare is equal to "!Cond".
441 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
443 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
450 /// In the case of a binary operation with an operand that is a PHI instruction,
451 /// try to simplify the binop by seeing whether evaluating it on the incoming
452 /// phi values yields the same result for every value. If so returns the common
453 /// value, otherwise returns null.
454 static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
455 const Query &Q, unsigned MaxRecurse) {
456 // Recursion is always used, so bail out at once if we already hit the limit.
461 if (isa<PHINode>(LHS)) {
462 PI = cast<PHINode>(LHS);
463 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
464 if (!ValueDominatesPHI(RHS, PI, Q.DT))
467 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
468 PI = cast<PHINode>(RHS);
469 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
470 if (!ValueDominatesPHI(LHS, PI, Q.DT))
474 // Evaluate the BinOp on the incoming phi values.
475 Value *CommonValue = nullptr;
476 for (Value *Incoming : PI->incoming_values()) {
477 // If the incoming value is the phi node itself, it can safely be skipped.
478 if (Incoming == PI) continue;
479 Value *V = PI == LHS ?
480 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
481 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
482 // If the operation failed to simplify, or simplified to a different value
483 // to previously, then give up.
484 if (!V || (CommonValue && V != CommonValue))
492 /// In the case of a comparison with a PHI instruction, try to simplify the
493 /// comparison by seeing whether comparing with all of the incoming phi values
494 /// yields the same result every time. If so returns the common result,
495 /// otherwise returns null.
496 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
497 const Query &Q, unsigned MaxRecurse) {
498 // Recursion is always used, so bail out at once if we already hit the limit.
502 // Make sure the phi is on the LHS.
503 if (!isa<PHINode>(LHS)) {
505 Pred = CmpInst::getSwappedPredicate(Pred);
507 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
508 PHINode *PI = cast<PHINode>(LHS);
510 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
511 if (!ValueDominatesPHI(RHS, PI, Q.DT))
514 // Evaluate the BinOp on the incoming phi values.
515 Value *CommonValue = nullptr;
516 for (Value *Incoming : PI->incoming_values()) {
517 // If the incoming value is the phi node itself, it can safely be skipped.
518 if (Incoming == PI) continue;
519 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
520 // If the operation failed to simplify, or simplified to a different value
521 // to previously, then give up.
522 if (!V || (CommonValue && V != CommonValue))
530 /// Given operands for an Add, see if we can fold the result.
531 /// If not, this returns null.
532 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
533 const Query &Q, unsigned MaxRecurse) {
534 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
535 if (Constant *CRHS = dyn_cast<Constant>(Op1))
536 return ConstantFoldBinaryOpOperands(Instruction::Add, CLHS, CRHS, Q.DL);
538 // Canonicalize the constant to the RHS.
542 // X + undef -> undef
543 if (match(Op1, m_Undef()))
547 if (match(Op1, m_Zero()))
554 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
555 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
558 // X + ~X -> -1 since ~X = -X-1
559 if (match(Op0, m_Not(m_Specific(Op1))) ||
560 match(Op1, m_Not(m_Specific(Op0))))
561 return Constant::getAllOnesValue(Op0->getType());
564 if (MaxRecurse && Op0->getType()->isIntegerTy(1))
565 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
568 // Try some generic simplifications for associative operations.
569 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
573 // Threading Add over selects and phi nodes is pointless, so don't bother.
574 // Threading over the select in "A + select(cond, B, C)" means evaluating
575 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
576 // only if B and C are equal. If B and C are equal then (since we assume
577 // that operands have already been simplified) "select(cond, B, C)" should
578 // have been simplified to the common value of B and C already. Analysing
579 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
580 // for threading over phi nodes.
585 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
586 const DataLayout &DL, const TargetLibraryInfo *TLI,
587 const DominatorTree *DT, AssumptionCache *AC,
588 const Instruction *CxtI) {
589 return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
593 /// \brief Compute the base pointer and cumulative constant offsets for V.
595 /// This strips all constant offsets off of V, leaving it the base pointer, and
596 /// accumulates the total constant offset applied in the returned constant. It
597 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
598 /// no constant offsets applied.
600 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
601 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
603 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
604 bool AllowNonInbounds = false) {
605 assert(V->getType()->getScalarType()->isPointerTy());
607 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
608 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
610 // Even though we don't look through PHI nodes, we could be called on an
611 // instruction in an unreachable block, which may be on a cycle.
612 SmallPtrSet<Value *, 4> Visited;
615 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
616 if ((!AllowNonInbounds && !GEP->isInBounds()) ||
617 !GEP->accumulateConstantOffset(DL, Offset))
619 V = GEP->getPointerOperand();
620 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
621 V = cast<Operator>(V)->getOperand(0);
622 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
623 if (GA->isInterposable())
625 V = GA->getAliasee();
627 if (auto CS = CallSite(V))
628 if (Value *RV = CS.getReturnedArgOperand()) {
634 assert(V->getType()->getScalarType()->isPointerTy() &&
635 "Unexpected operand type!");
636 } while (Visited.insert(V).second);
638 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
639 if (V->getType()->isVectorTy())
640 return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
645 /// \brief Compute the constant difference between two pointer values.
646 /// If the difference is not a constant, returns zero.
647 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
649 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
650 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
652 // If LHS and RHS are not related via constant offsets to the same base
653 // value, there is nothing we can do here.
657 // Otherwise, the difference of LHS - RHS can be computed as:
659 // = (LHSOffset + Base) - (RHSOffset + Base)
660 // = LHSOffset - RHSOffset
661 return ConstantExpr::getSub(LHSOffset, RHSOffset);
664 /// Given operands for a Sub, see if we can fold the result.
665 /// If not, this returns null.
666 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
667 const Query &Q, unsigned MaxRecurse) {
668 if (Constant *CLHS = dyn_cast<Constant>(Op0))
669 if (Constant *CRHS = dyn_cast<Constant>(Op1))
670 return ConstantFoldBinaryOpOperands(Instruction::Sub, CLHS, CRHS, Q.DL);
672 // X - undef -> undef
673 // undef - X -> undef
674 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
675 return UndefValue::get(Op0->getType());
678 if (match(Op1, m_Zero()))
683 return Constant::getNullValue(Op0->getType());
685 // Is this a negation?
686 if (match(Op0, m_Zero())) {
687 // 0 - X -> 0 if the sub is NUW.
691 unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
692 APInt KnownZero(BitWidth, 0);
693 APInt KnownOne(BitWidth, 0);
694 computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
695 if (KnownZero == ~APInt::getSignBit(BitWidth)) {
696 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
697 // Op1 must be 0 because negating the minimum signed value is undefined.
701 // 0 - X -> X if X is 0 or the minimum signed value.
706 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
707 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
708 Value *X = nullptr, *Y = nullptr, *Z = Op1;
709 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
710 // See if "V === Y - Z" simplifies.
711 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
712 // It does! Now see if "X + V" simplifies.
713 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
714 // It does, we successfully reassociated!
718 // See if "V === X - Z" simplifies.
719 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
720 // It does! Now see if "Y + V" simplifies.
721 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
722 // It does, we successfully reassociated!
728 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
729 // For example, X - (X + 1) -> -1
731 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
732 // See if "V === X - Y" simplifies.
733 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
734 // It does! Now see if "V - Z" simplifies.
735 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
736 // It does, we successfully reassociated!
740 // See if "V === X - Z" simplifies.
741 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
742 // It does! Now see if "V - Y" simplifies.
743 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
744 // It does, we successfully reassociated!
750 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
751 // For example, X - (X - Y) -> Y.
753 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
754 // See if "V === Z - X" simplifies.
755 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
756 // It does! Now see if "V + Y" simplifies.
757 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
758 // It does, we successfully reassociated!
763 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
764 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
765 match(Op1, m_Trunc(m_Value(Y))))
766 if (X->getType() == Y->getType())
767 // See if "V === X - Y" simplifies.
768 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
769 // It does! Now see if "trunc V" simplifies.
770 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
772 // It does, return the simplified "trunc V".
775 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
776 if (match(Op0, m_PtrToInt(m_Value(X))) &&
777 match(Op1, m_PtrToInt(m_Value(Y))))
778 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
779 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
782 if (MaxRecurse && Op0->getType()->isIntegerTy(1))
783 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
786 // Threading Sub over selects and phi nodes is pointless, so don't bother.
787 // Threading over the select in "A - select(cond, B, C)" means evaluating
788 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
789 // only if B and C are equal. If B and C are equal then (since we assume
790 // that operands have already been simplified) "select(cond, B, C)" should
791 // have been simplified to the common value of B and C already. Analysing
792 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
793 // for threading over phi nodes.
798 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
799 const DataLayout &DL, const TargetLibraryInfo *TLI,
800 const DominatorTree *DT, AssumptionCache *AC,
801 const Instruction *CxtI) {
802 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
806 /// Given operands for an FAdd, see if we can fold the result. If not, this
808 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
809 const Query &Q, unsigned MaxRecurse) {
810 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
811 if (Constant *CRHS = dyn_cast<Constant>(Op1))
812 return ConstantFoldBinaryOpOperands(Instruction::FAdd, CLHS, CRHS, Q.DL);
814 // Canonicalize the constant to the RHS.
819 if (match(Op1, m_NegZero()))
822 // fadd X, 0 ==> X, when we know X is not -0
823 if (match(Op1, m_Zero()) &&
824 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
827 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
828 // where nnan and ninf have to occur at least once somewhere in this
830 Value *SubOp = nullptr;
831 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
833 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
836 Instruction *FSub = cast<Instruction>(SubOp);
837 if ((FMF.noNaNs() || FSub->hasNoNaNs()) &&
838 (FMF.noInfs() || FSub->hasNoInfs()))
839 return Constant::getNullValue(Op0->getType());
845 /// Given operands for an FSub, see if we can fold the result. If not, this
847 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
848 const Query &Q, unsigned MaxRecurse) {
849 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
850 if (Constant *CRHS = dyn_cast<Constant>(Op1))
851 return ConstantFoldBinaryOpOperands(Instruction::FSub, CLHS, CRHS, Q.DL);
855 if (match(Op1, m_Zero()))
858 // fsub X, -0 ==> X, when we know X is not -0
859 if (match(Op1, m_NegZero()) &&
860 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
863 // fsub -0.0, (fsub -0.0, X) ==> X
865 if (match(Op0, m_NegZero()) && match(Op1, m_FSub(m_NegZero(), m_Value(X))))
868 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
869 if (FMF.noSignedZeros() && match(Op0, m_AnyZero()) &&
870 match(Op1, m_FSub(m_AnyZero(), m_Value(X))))
873 // fsub nnan x, x ==> 0.0
874 if (FMF.noNaNs() && Op0 == Op1)
875 return Constant::getNullValue(Op0->getType());
880 /// Given the operands for an FMul, see if we can fold the result
881 static Value *SimplifyFMulInst(Value *Op0, Value *Op1,
884 unsigned MaxRecurse) {
885 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
886 if (Constant *CRHS = dyn_cast<Constant>(Op1))
887 return ConstantFoldBinaryOpOperands(Instruction::FMul, CLHS, CRHS, Q.DL);
889 // Canonicalize the constant to the RHS.
894 if (match(Op1, m_FPOne()))
897 // fmul nnan nsz X, 0 ==> 0
898 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
904 /// Given operands for a Mul, see if we can fold the result.
905 /// If not, this returns null.
906 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
907 unsigned MaxRecurse) {
908 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
909 if (Constant *CRHS = dyn_cast<Constant>(Op1))
910 return ConstantFoldBinaryOpOperands(Instruction::Mul, CLHS, CRHS, Q.DL);
912 // Canonicalize the constant to the RHS.
917 if (match(Op1, m_Undef()))
918 return Constant::getNullValue(Op0->getType());
921 if (match(Op1, m_Zero()))
925 if (match(Op1, m_One()))
928 // (X / Y) * Y -> X if the division is exact.
930 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
931 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
935 if (MaxRecurse && Op0->getType()->isIntegerTy(1))
936 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
939 // Try some generic simplifications for associative operations.
940 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
944 // Mul distributes over Add. Try some generic simplifications based on this.
945 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
949 // If the operation is with the result of a select instruction, check whether
950 // operating on either branch of the select always yields the same value.
951 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
952 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
956 // If the operation is with the result of a phi instruction, check whether
957 // operating on all incoming values of the phi always yields the same value.
958 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
959 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
966 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
967 const DataLayout &DL,
968 const TargetLibraryInfo *TLI,
969 const DominatorTree *DT, AssumptionCache *AC,
970 const Instruction *CxtI) {
971 return ::SimplifyFAddInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
975 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
976 const DataLayout &DL,
977 const TargetLibraryInfo *TLI,
978 const DominatorTree *DT, AssumptionCache *AC,
979 const Instruction *CxtI) {
980 return ::SimplifyFSubInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
984 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
985 const DataLayout &DL,
986 const TargetLibraryInfo *TLI,
987 const DominatorTree *DT, AssumptionCache *AC,
988 const Instruction *CxtI) {
989 return ::SimplifyFMulInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
993 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
994 const TargetLibraryInfo *TLI,
995 const DominatorTree *DT, AssumptionCache *AC,
996 const Instruction *CxtI) {
997 return ::SimplifyMulInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1001 /// Given operands for an SDiv or UDiv, see if we can fold the result.
1002 /// If not, this returns null.
1003 static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1004 const Query &Q, unsigned MaxRecurse) {
1005 if (Constant *C0 = dyn_cast<Constant>(Op0))
1006 if (Constant *C1 = dyn_cast<Constant>(Op1))
1007 return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL);
1009 bool isSigned = Opcode == Instruction::SDiv;
1011 // X / undef -> undef
1012 if (match(Op1, m_Undef()))
1015 // X / 0 -> undef, we don't need to preserve faults!
1016 if (match(Op1, m_Zero()))
1017 return UndefValue::get(Op1->getType());
1020 if (match(Op0, m_Undef()))
1021 return Constant::getNullValue(Op0->getType());
1023 // 0 / X -> 0, we don't need to preserve faults!
1024 if (match(Op0, m_Zero()))
1028 if (match(Op1, m_One()))
1031 if (Op0->getType()->isIntegerTy(1))
1032 // It can't be division by zero, hence it must be division by one.
1037 return ConstantInt::get(Op0->getType(), 1);
1039 // (X * Y) / Y -> X if the multiplication does not overflow.
1040 Value *X = nullptr, *Y = nullptr;
1041 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
1042 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
1043 OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
1044 // If the Mul knows it does not overflow, then we are good to go.
1045 if ((isSigned && Mul->hasNoSignedWrap()) ||
1046 (!isSigned && Mul->hasNoUnsignedWrap()))
1048 // If X has the form X = A / Y then X * Y cannot overflow.
1049 if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X))
1050 if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y)
1054 // (X rem Y) / Y -> 0
1055 if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1056 (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1057 return Constant::getNullValue(Op0->getType());
1059 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1060 ConstantInt *C1, *C2;
1061 if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1062 match(Op1, m_ConstantInt(C2))) {
1064 C1->getValue().umul_ov(C2->getValue(), Overflow);
1066 return Constant::getNullValue(Op0->getType());
1069 // If the operation is with the result of a select instruction, check whether
1070 // operating on either branch of the select always yields the same value.
1071 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1072 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1075 // If the operation is with the result of a phi instruction, check whether
1076 // operating on all incoming values of the phi always yields the same value.
1077 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1078 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1084 /// Given operands for an SDiv, see if we can fold the result.
1085 /// If not, this returns null.
1086 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
1087 unsigned MaxRecurse) {
1088 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
1094 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
1095 const TargetLibraryInfo *TLI,
1096 const DominatorTree *DT, AssumptionCache *AC,
1097 const Instruction *CxtI) {
1098 return ::SimplifySDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1102 /// Given operands for a UDiv, see if we can fold the result.
1103 /// If not, this returns null.
1104 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
1105 unsigned MaxRecurse) {
1106 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
1112 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
1113 const TargetLibraryInfo *TLI,
1114 const DominatorTree *DT, AssumptionCache *AC,
1115 const Instruction *CxtI) {
1116 return ::SimplifyUDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1120 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1121 const Query &Q, unsigned) {
1122 // undef / X -> undef (the undef could be a snan).
1123 if (match(Op0, m_Undef()))
1126 // X / undef -> undef
1127 if (match(Op1, m_Undef()))
1131 if (match(Op1, m_FPOne()))
1135 // Requires that NaNs are off (X could be zero) and signed zeroes are
1136 // ignored (X could be positive or negative, so the output sign is unknown).
1137 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
1141 // X / X -> 1.0 is legal when NaNs are ignored.
1143 return ConstantFP::get(Op0->getType(), 1.0);
1145 // -X / X -> -1.0 and
1146 // X / -X -> -1.0 are legal when NaNs are ignored.
1147 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
1148 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
1149 BinaryOperator::getFNegArgument(Op0) == Op1) ||
1150 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
1151 BinaryOperator::getFNegArgument(Op1) == Op0))
1152 return ConstantFP::get(Op0->getType(), -1.0);
1158 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1159 const DataLayout &DL,
1160 const TargetLibraryInfo *TLI,
1161 const DominatorTree *DT, AssumptionCache *AC,
1162 const Instruction *CxtI) {
1163 return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
1167 /// Given operands for an SRem or URem, see if we can fold the result.
1168 /// If not, this returns null.
1169 static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1170 const Query &Q, unsigned MaxRecurse) {
1171 if (Constant *C0 = dyn_cast<Constant>(Op0))
1172 if (Constant *C1 = dyn_cast<Constant>(Op1))
1173 return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL);
1175 // X % undef -> undef
1176 if (match(Op1, m_Undef()))
1180 if (match(Op0, m_Undef()))
1181 return Constant::getNullValue(Op0->getType());
1183 // 0 % X -> 0, we don't need to preserve faults!
1184 if (match(Op0, m_Zero()))
1187 // X % 0 -> undef, we don't need to preserve faults!
1188 if (match(Op1, m_Zero()))
1189 return UndefValue::get(Op0->getType());
1192 if (match(Op1, m_One()))
1193 return Constant::getNullValue(Op0->getType());
1195 if (Op0->getType()->isIntegerTy(1))
1196 // It can't be remainder by zero, hence it must be remainder by one.
1197 return Constant::getNullValue(Op0->getType());
1201 return Constant::getNullValue(Op0->getType());
1203 // (X % Y) % Y -> X % Y
1204 if ((Opcode == Instruction::SRem &&
1205 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1206 (Opcode == Instruction::URem &&
1207 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1210 // If the operation is with the result of a select instruction, check whether
1211 // operating on either branch of the select always yields the same value.
1212 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1213 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1216 // If the operation is with the result of a phi instruction, check whether
1217 // operating on all incoming values of the phi always yields the same value.
1218 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1219 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1225 /// Given operands for an SRem, see if we can fold the result.
1226 /// If not, this returns null.
1227 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
1228 unsigned MaxRecurse) {
1229 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
1235 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
1236 const TargetLibraryInfo *TLI,
1237 const DominatorTree *DT, AssumptionCache *AC,
1238 const Instruction *CxtI) {
1239 return ::SimplifySRemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1243 /// Given operands for a URem, see if we can fold the result.
1244 /// If not, this returns null.
1245 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
1246 unsigned MaxRecurse) {
1247 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
1253 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
1254 const TargetLibraryInfo *TLI,
1255 const DominatorTree *DT, AssumptionCache *AC,
1256 const Instruction *CxtI) {
1257 return ::SimplifyURemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1261 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1262 const Query &, unsigned) {
1263 // undef % X -> undef (the undef could be a snan).
1264 if (match(Op0, m_Undef()))
1267 // X % undef -> undef
1268 if (match(Op1, m_Undef()))
1272 // Requires that NaNs are off (X could be zero) and signed zeroes are
1273 // ignored (X could be positive or negative, so the output sign is unknown).
1274 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
1280 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
1281 const DataLayout &DL,
1282 const TargetLibraryInfo *TLI,
1283 const DominatorTree *DT, AssumptionCache *AC,
1284 const Instruction *CxtI) {
1285 return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
1289 /// Returns true if a shift by \c Amount always yields undef.
1290 static bool isUndefShift(Value *Amount) {
1291 Constant *C = dyn_cast<Constant>(Amount);
1295 // X shift by undef -> undef because it may shift by the bitwidth.
1296 if (isa<UndefValue>(C))
1299 // Shifting by the bitwidth or more is undefined.
1300 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1301 if (CI->getValue().getLimitedValue() >=
1302 CI->getType()->getScalarSizeInBits())
1305 // If all lanes of a vector shift are undefined the whole shift is.
1306 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1307 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
1308 if (!isUndefShift(C->getAggregateElement(I)))
1316 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1317 /// If not, this returns null.
1318 static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
1319 const Query &Q, unsigned MaxRecurse) {
1320 if (Constant *C0 = dyn_cast<Constant>(Op0))
1321 if (Constant *C1 = dyn_cast<Constant>(Op1))
1322 return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL);
1324 // 0 shift by X -> 0
1325 if (match(Op0, m_Zero()))
1328 // X shift by 0 -> X
1329 if (match(Op1, m_Zero()))
1332 // Fold undefined shifts.
1333 if (isUndefShift(Op1))
1334 return UndefValue::get(Op0->getType());
1336 // If the operation is with the result of a select instruction, check whether
1337 // operating on either branch of the select always yields the same value.
1338 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1339 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1342 // If the operation is with the result of a phi instruction, check whether
1343 // operating on all incoming values of the phi always yields the same value.
1344 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1345 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1348 // If any bits in the shift amount make that value greater than or equal to
1349 // the number of bits in the type, the shift is undefined.
1350 unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
1351 APInt KnownZero(BitWidth, 0);
1352 APInt KnownOne(BitWidth, 0);
1353 computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1354 if (KnownOne.getLimitedValue() >= BitWidth)
1355 return UndefValue::get(Op0->getType());
1357 // If all valid bits in the shift amount are known zero, the first operand is
1359 unsigned NumValidShiftBits = Log2_32_Ceil(BitWidth);
1360 APInt ShiftAmountMask = APInt::getLowBitsSet(BitWidth, NumValidShiftBits);
1361 if ((KnownZero & ShiftAmountMask) == ShiftAmountMask)
1367 /// \brief Given operands for an Shl, LShr or AShr, see if we can
1368 /// fold the result. If not, this returns null.
1369 static Value *SimplifyRightShift(unsigned Opcode, Value *Op0, Value *Op1,
1370 bool isExact, const Query &Q,
1371 unsigned MaxRecurse) {
1372 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
1377 return Constant::getNullValue(Op0->getType());
1380 // undef >> X -> undef (if it's exact)
1381 if (match(Op0, m_Undef()))
1382 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1384 // The low bit cannot be shifted out of an exact shift if it is set.
1386 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
1387 APInt Op0KnownZero(BitWidth, 0);
1388 APInt Op0KnownOne(BitWidth, 0);
1389 computeKnownBits(Op0, Op0KnownZero, Op0KnownOne, Q.DL, /*Depth=*/0, Q.AC,
1398 /// Given operands for an Shl, see if we can fold the result.
1399 /// If not, this returns null.
1400 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1401 const Query &Q, unsigned MaxRecurse) {
1402 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
1406 // undef << X -> undef if (if it's NSW/NUW)
1407 if (match(Op0, m_Undef()))
1408 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1410 // (X >> A) << A -> X
1412 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1417 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1418 const DataLayout &DL, const TargetLibraryInfo *TLI,
1419 const DominatorTree *DT, AssumptionCache *AC,
1420 const Instruction *CxtI) {
1421 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
1425 /// Given operands for an LShr, see if we can fold the result.
1426 /// If not, this returns null.
1427 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1428 const Query &Q, unsigned MaxRecurse) {
1429 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1433 // (X << A) >> A -> X
1435 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1441 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1442 const DataLayout &DL,
1443 const TargetLibraryInfo *TLI,
1444 const DominatorTree *DT, AssumptionCache *AC,
1445 const Instruction *CxtI) {
1446 return ::SimplifyLShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
1450 /// Given operands for an AShr, see if we can fold the result.
1451 /// If not, this returns null.
1452 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1453 const Query &Q, unsigned MaxRecurse) {
1454 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1458 // all ones >>a X -> all ones
1459 if (match(Op0, m_AllOnes()))
1462 // (X << A) >> A -> X
1464 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1467 // Arithmetic shifting an all-sign-bit value is a no-op.
1468 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1469 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1475 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1476 const DataLayout &DL,
1477 const TargetLibraryInfo *TLI,
1478 const DominatorTree *DT, AssumptionCache *AC,
1479 const Instruction *CxtI) {
1480 return ::SimplifyAShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
1484 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1485 ICmpInst *UnsignedICmp, bool IsAnd) {
1488 ICmpInst::Predicate EqPred;
1489 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1490 !ICmpInst::isEquality(EqPred))
1493 ICmpInst::Predicate UnsignedPred;
1494 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1495 ICmpInst::isUnsigned(UnsignedPred))
1497 else if (match(UnsignedICmp,
1498 m_ICmp(UnsignedPred, m_Value(Y), m_Specific(X))) &&
1499 ICmpInst::isUnsigned(UnsignedPred))
1500 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1504 // X < Y && Y != 0 --> X < Y
1505 // X < Y || Y != 0 --> Y != 0
1506 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1507 return IsAnd ? UnsignedICmp : ZeroICmp;
1509 // X >= Y || Y != 0 --> true
1510 // X >= Y || Y == 0 --> X >= Y
1511 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
1512 if (EqPred == ICmpInst::ICMP_NE)
1513 return getTrue(UnsignedICmp->getType());
1514 return UnsignedICmp;
1517 // X < Y && Y == 0 --> false
1518 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1520 return getFalse(UnsignedICmp->getType());
1525 /// Commuted variants are assumed to be handled by calling this function again
1526 /// with the parameters swapped.
1527 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1528 ICmpInst::Predicate Pred0, Pred1;
1530 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1531 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1534 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1535 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1536 // can eliminate Op1 from this 'and'.
1537 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1540 // Check for any combination of predicates that are guaranteed to be disjoint.
1541 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1542 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1543 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1544 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1545 return getFalse(Op0->getType());
1550 /// Commuted variants are assumed to be handled by calling this function again
1551 /// with the parameters swapped.
1552 static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1553 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
1556 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1559 // Look for this pattern: (icmp V, C0) & (icmp V, C1)).
1560 Type *ITy = Op0->getType();
1561 ICmpInst::Predicate Pred0, Pred1;
1562 const APInt *C0, *C1;
1564 if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
1565 match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
1566 // Make a constant range that's the intersection of the two icmp ranges.
1567 // If the intersection is empty, we know that the result is false.
1568 auto Range0 = ConstantRange::makeAllowedICmpRegion(Pred0, *C0);
1569 auto Range1 = ConstantRange::makeAllowedICmpRegion(Pred1, *C1);
1570 if (Range0.intersectWith(Range1).isEmptySet())
1571 return getFalse(ITy);
1574 // (icmp (add V, C0), C1) & (icmp V, C0)
1575 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1578 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1581 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1582 if (AddInst->getOperand(1) != Op1->getOperand(1))
1585 bool isNSW = AddInst->hasNoSignedWrap();
1586 bool isNUW = AddInst->hasNoUnsignedWrap();
1588 const APInt Delta = *C1 - *C0;
1589 if (C0->isStrictlyPositive()) {
1591 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1592 return getFalse(ITy);
1593 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1594 return getFalse(ITy);
1597 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1598 return getFalse(ITy);
1599 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1600 return getFalse(ITy);
1603 if (C0->getBoolValue() && isNUW) {
1605 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1606 return getFalse(ITy);
1608 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1609 return getFalse(ITy);
1615 /// Given operands for an And, see if we can fold the result.
1616 /// If not, this returns null.
1617 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
1618 unsigned MaxRecurse) {
1619 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
1620 if (Constant *CRHS = dyn_cast<Constant>(Op1))
1621 return ConstantFoldBinaryOpOperands(Instruction::And, CLHS, CRHS, Q.DL);
1623 // Canonicalize the constant to the RHS.
1624 std::swap(Op0, Op1);
1628 if (match(Op1, m_Undef()))
1629 return Constant::getNullValue(Op0->getType());
1636 if (match(Op1, m_Zero()))
1640 if (match(Op1, m_AllOnes()))
1643 // A & ~A = ~A & A = 0
1644 if (match(Op0, m_Not(m_Specific(Op1))) ||
1645 match(Op1, m_Not(m_Specific(Op0))))
1646 return Constant::getNullValue(Op0->getType());
1649 Value *A = nullptr, *B = nullptr;
1650 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1651 (A == Op1 || B == Op1))
1655 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
1656 (A == Op0 || B == Op0))
1659 // A & (-A) = A if A is a power of two or zero.
1660 if (match(Op0, m_Neg(m_Specific(Op1))) ||
1661 match(Op1, m_Neg(m_Specific(Op0)))) {
1662 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1665 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1670 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
1671 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
1672 if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
1674 if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
1679 // The compares may be hidden behind casts. Look through those and try the
1680 // same folds as above.
1681 auto *Cast0 = dyn_cast<CastInst>(Op0);
1682 auto *Cast1 = dyn_cast<CastInst>(Op1);
1683 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1684 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1685 auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
1686 auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
1688 Instruction::CastOps CastOpc = Cast0->getOpcode();
1689 Type *ResultType = Cast0->getType();
1690 if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
1691 return ConstantExpr::getCast(CastOpc, V, ResultType);
1692 if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
1693 return ConstantExpr::getCast(CastOpc, V, ResultType);
1697 // Try some generic simplifications for associative operations.
1698 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
1702 // And distributes over Or. Try some generic simplifications based on this.
1703 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
1707 // And distributes over Xor. Try some generic simplifications based on this.
1708 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
1712 // If the operation is with the result of a select instruction, check whether
1713 // operating on either branch of the select always yields the same value.
1714 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1715 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
1719 // If the operation is with the result of a phi instruction, check whether
1720 // operating on all incoming values of the phi always yields the same value.
1721 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1722 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
1729 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
1730 const TargetLibraryInfo *TLI,
1731 const DominatorTree *DT, AssumptionCache *AC,
1732 const Instruction *CxtI) {
1733 return ::SimplifyAndInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1737 /// Commuted variants are assumed to be handled by calling this function again
1738 /// with the parameters swapped.
1739 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1740 ICmpInst::Predicate Pred0, Pred1;
1742 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1743 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1746 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1747 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1748 // can eliminate Op0 from this 'or'.
1749 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1752 // Check for any combination of predicates that cover the entire range of
1754 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1755 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1756 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1757 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1758 return getTrue(Op0->getType());
1763 /// Commuted variants are assumed to be handled by calling this function again
1764 /// with the parameters swapped.
1765 static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1766 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
1769 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1772 // (icmp (add V, C0), C1) | (icmp V, C0)
1773 ICmpInst::Predicate Pred0, Pred1;
1774 const APInt *C0, *C1;
1776 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1779 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1782 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1783 if (AddInst->getOperand(1) != Op1->getOperand(1))
1786 Type *ITy = Op0->getType();
1787 bool isNSW = AddInst->hasNoSignedWrap();
1788 bool isNUW = AddInst->hasNoUnsignedWrap();
1790 const APInt Delta = *C1 - *C0;
1791 if (C0->isStrictlyPositive()) {
1793 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1794 return getTrue(ITy);
1795 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1796 return getTrue(ITy);
1799 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1800 return getTrue(ITy);
1801 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1802 return getTrue(ITy);
1805 if (C0->getBoolValue() && isNUW) {
1807 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1808 return getTrue(ITy);
1810 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1811 return getTrue(ITy);
1817 /// Given operands for an Or, see if we can fold the result.
1818 /// If not, this returns null.
1819 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
1820 unsigned MaxRecurse) {
1821 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
1822 if (Constant *CRHS = dyn_cast<Constant>(Op1))
1823 return ConstantFoldBinaryOpOperands(Instruction::Or, CLHS, CRHS, Q.DL);
1825 // Canonicalize the constant to the RHS.
1826 std::swap(Op0, Op1);
1830 if (match(Op1, m_Undef()))
1831 return Constant::getAllOnesValue(Op0->getType());
1838 if (match(Op1, m_Zero()))
1842 if (match(Op1, m_AllOnes()))
1845 // A | ~A = ~A | A = -1
1846 if (match(Op0, m_Not(m_Specific(Op1))) ||
1847 match(Op1, m_Not(m_Specific(Op0))))
1848 return Constant::getAllOnesValue(Op0->getType());
1851 Value *A = nullptr, *B = nullptr;
1852 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1853 (A == Op1 || B == Op1))
1857 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1858 (A == Op0 || B == Op0))
1861 // ~(A & ?) | A = -1
1862 if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) &&
1863 (A == Op1 || B == Op1))
1864 return Constant::getAllOnesValue(Op1->getType());
1866 // A | ~(A & ?) = -1
1867 if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) &&
1868 (A == Op0 || B == Op0))
1869 return Constant::getAllOnesValue(Op0->getType());
1871 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
1872 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
1873 if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
1875 if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
1880 // Try some generic simplifications for associative operations.
1881 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
1885 // Or distributes over And. Try some generic simplifications based on this.
1886 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
1890 // If the operation is with the result of a select instruction, check whether
1891 // operating on either branch of the select always yields the same value.
1892 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1893 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
1898 Value *C = nullptr, *D = nullptr;
1899 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1900 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1901 ConstantInt *C1 = dyn_cast<ConstantInt>(C);
1902 ConstantInt *C2 = dyn_cast<ConstantInt>(D);
1903 if (C1 && C2 && (C1->getValue() == ~C2->getValue())) {
1904 // (A & C1)|(B & C2)
1905 // If we have: ((V + N) & C1) | (V & C2)
1906 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1907 // replace with V+N.
1909 if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+
1910 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1911 // Add commutes, try both ways.
1913 MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1916 MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1919 // Or commutes, try both ways.
1920 if ((C1->getValue() & (C1->getValue() + 1)) == 0 &&
1921 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1922 // Add commutes, try both ways.
1924 MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1927 MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1933 // If the operation is with the result of a phi instruction, check whether
1934 // operating on all incoming values of the phi always yields the same value.
1935 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1936 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
1942 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
1943 const TargetLibraryInfo *TLI,
1944 const DominatorTree *DT, AssumptionCache *AC,
1945 const Instruction *CxtI) {
1946 return ::SimplifyOrInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
1950 /// Given operands for a Xor, see if we can fold the result.
1951 /// If not, this returns null.
1952 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
1953 unsigned MaxRecurse) {
1954 if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
1955 if (Constant *CRHS = dyn_cast<Constant>(Op1))
1956 return ConstantFoldBinaryOpOperands(Instruction::Xor, CLHS, CRHS, Q.DL);
1958 // Canonicalize the constant to the RHS.
1959 std::swap(Op0, Op1);
1962 // A ^ undef -> undef
1963 if (match(Op1, m_Undef()))
1967 if (match(Op1, m_Zero()))
1972 return Constant::getNullValue(Op0->getType());
1974 // A ^ ~A = ~A ^ A = -1
1975 if (match(Op0, m_Not(m_Specific(Op1))) ||
1976 match(Op1, m_Not(m_Specific(Op0))))
1977 return Constant::getAllOnesValue(Op0->getType());
1979 // Try some generic simplifications for associative operations.
1980 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
1984 // Threading Xor over selects and phi nodes is pointless, so don't bother.
1985 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
1986 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
1987 // only if B and C are equal. If B and C are equal then (since we assume
1988 // that operands have already been simplified) "select(cond, B, C)" should
1989 // have been simplified to the common value of B and C already. Analysing
1990 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
1991 // for threading over phi nodes.
1996 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
1997 const TargetLibraryInfo *TLI,
1998 const DominatorTree *DT, AssumptionCache *AC,
1999 const Instruction *CxtI) {
2000 return ::SimplifyXorInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
2004 static Type *GetCompareTy(Value *Op) {
2005 return CmpInst::makeCmpResultType(Op->getType());
2008 /// Rummage around inside V looking for something equivalent to the comparison
2009 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2010 /// Helper function for analyzing max/min idioms.
2011 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2012 Value *LHS, Value *RHS) {
2013 SelectInst *SI = dyn_cast<SelectInst>(V);
2016 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2019 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2020 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2022 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2023 LHS == CmpRHS && RHS == CmpLHS)
2028 // A significant optimization not implemented here is assuming that alloca
2029 // addresses are not equal to incoming argument values. They don't *alias*,
2030 // as we say, but that doesn't mean they aren't equal, so we take a
2031 // conservative approach.
2033 // This is inspired in part by C++11 5.10p1:
2034 // "Two pointers of the same type compare equal if and only if they are both
2035 // null, both point to the same function, or both represent the same
2038 // This is pretty permissive.
2040 // It's also partly due to C11 6.5.9p6:
2041 // "Two pointers compare equal if and only if both are null pointers, both are
2042 // pointers to the same object (including a pointer to an object and a
2043 // subobject at its beginning) or function, both are pointers to one past the
2044 // last element of the same array object, or one is a pointer to one past the
2045 // end of one array object and the other is a pointer to the start of a
2046 // different array object that happens to immediately follow the first array
2047 // object in the address space.)
2049 // C11's version is more restrictive, however there's no reason why an argument
2050 // couldn't be a one-past-the-end value for a stack object in the caller and be
2051 // equal to the beginning of a stack object in the callee.
2053 // If the C and C++ standards are ever made sufficiently restrictive in this
2054 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2055 // this optimization.
2057 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
2058 const DominatorTree *DT, CmpInst::Predicate Pred,
2059 const Instruction *CxtI, Value *LHS, Value *RHS) {
2060 // First, skip past any trivial no-ops.
2061 LHS = LHS->stripPointerCasts();
2062 RHS = RHS->stripPointerCasts();
2064 // A non-null pointer is not equal to a null pointer.
2065 if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
2066 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
2067 return ConstantInt::get(GetCompareTy(LHS),
2068 !CmpInst::isTrueWhenEqual(Pred));
2070 // We can only fold certain predicates on pointer comparisons.
2075 // Equality comaprisons are easy to fold.
2076 case CmpInst::ICMP_EQ:
2077 case CmpInst::ICMP_NE:
2080 // We can only handle unsigned relational comparisons because 'inbounds' on
2081 // a GEP only protects against unsigned wrapping.
2082 case CmpInst::ICMP_UGT:
2083 case CmpInst::ICMP_UGE:
2084 case CmpInst::ICMP_ULT:
2085 case CmpInst::ICMP_ULE:
2086 // However, we have to switch them to their signed variants to handle
2087 // negative indices from the base pointer.
2088 Pred = ICmpInst::getSignedPredicate(Pred);
2092 // Strip off any constant offsets so that we can reason about them.
2093 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2094 // here and compare base addresses like AliasAnalysis does, however there are
2095 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2096 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2097 // doesn't need to guarantee pointer inequality when it says NoAlias.
2098 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2099 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2101 // If LHS and RHS are related via constant offsets to the same base
2102 // value, we can replace it with an icmp which just compares the offsets.
2104 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2106 // Various optimizations for (in)equality comparisons.
2107 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2108 // Different non-empty allocations that exist at the same time have
2109 // different addresses (if the program can tell). Global variables always
2110 // exist, so they always exist during the lifetime of each other and all
2111 // allocas. Two different allocas usually have different addresses...
2113 // However, if there's an @llvm.stackrestore dynamically in between two
2114 // allocas, they may have the same address. It's tempting to reduce the
2115 // scope of the problem by only looking at *static* allocas here. That would
2116 // cover the majority of allocas while significantly reducing the likelihood
2117 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2118 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2119 // an entry block. Also, if we have a block that's not attached to a
2120 // function, we can't tell if it's "static" under the current definition.
2121 // Theoretically, this problem could be fixed by creating a new kind of
2122 // instruction kind specifically for static allocas. Such a new instruction
2123 // could be required to be at the top of the entry block, thus preventing it
2124 // from being subject to a @llvm.stackrestore. Instcombine could even
2125 // convert regular allocas into these special allocas. It'd be nifty.
2126 // However, until then, this problem remains open.
2128 // So, we'll assume that two non-empty allocas have different addresses
2131 // With all that, if the offsets are within the bounds of their allocations
2132 // (and not one-past-the-end! so we can't use inbounds!), and their
2133 // allocations aren't the same, the pointers are not equal.
2135 // Note that it's not necessary to check for LHS being a global variable
2136 // address, due to canonicalization and constant folding.
2137 if (isa<AllocaInst>(LHS) &&
2138 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2139 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2140 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2141 uint64_t LHSSize, RHSSize;
2142 if (LHSOffsetCI && RHSOffsetCI &&
2143 getObjectSize(LHS, LHSSize, DL, TLI) &&
2144 getObjectSize(RHS, RHSSize, DL, TLI)) {
2145 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2146 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2147 if (!LHSOffsetValue.isNegative() &&
2148 !RHSOffsetValue.isNegative() &&
2149 LHSOffsetValue.ult(LHSSize) &&
2150 RHSOffsetValue.ult(RHSSize)) {
2151 return ConstantInt::get(GetCompareTy(LHS),
2152 !CmpInst::isTrueWhenEqual(Pred));
2156 // Repeat the above check but this time without depending on DataLayout
2157 // or being able to compute a precise size.
2158 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2159 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2160 LHSOffset->isNullValue() &&
2161 RHSOffset->isNullValue())
2162 return ConstantInt::get(GetCompareTy(LHS),
2163 !CmpInst::isTrueWhenEqual(Pred));
2166 // Even if an non-inbounds GEP occurs along the path we can still optimize
2167 // equality comparisons concerning the result. We avoid walking the whole
2168 // chain again by starting where the last calls to
2169 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2170 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2171 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2173 return ConstantExpr::getICmp(Pred,
2174 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2175 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2177 // If one side of the equality comparison must come from a noalias call
2178 // (meaning a system memory allocation function), and the other side must
2179 // come from a pointer that cannot overlap with dynamically-allocated
2180 // memory within the lifetime of the current function (allocas, byval
2181 // arguments, globals), then determine the comparison result here.
2182 SmallVector<Value *, 8> LHSUObjs, RHSUObjs;
2183 GetUnderlyingObjects(LHS, LHSUObjs, DL);
2184 GetUnderlyingObjects(RHS, RHSUObjs, DL);
2186 // Is the set of underlying objects all noalias calls?
2187 auto IsNAC = [](ArrayRef<Value *> Objects) {
2188 return all_of(Objects, isNoAliasCall);
2191 // Is the set of underlying objects all things which must be disjoint from
2192 // noalias calls. For allocas, we consider only static ones (dynamic
2193 // allocas might be transformed into calls to malloc not simultaneously
2194 // live with the compared-to allocation). For globals, we exclude symbols
2195 // that might be resolve lazily to symbols in another dynamically-loaded
2196 // library (and, thus, could be malloc'ed by the implementation).
2197 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) {
2198 return all_of(Objects, [](Value *V) {
2199 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2200 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2201 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2202 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2203 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2204 !GV->isThreadLocal();
2205 if (const Argument *A = dyn_cast<Argument>(V))
2206 return A->hasByValAttr();
2211 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2212 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2213 return ConstantInt::get(GetCompareTy(LHS),
2214 !CmpInst::isTrueWhenEqual(Pred));
2216 // Fold comparisons for non-escaping pointer even if the allocation call
2217 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2218 // dynamic allocation call could be either of the operands.
2219 Value *MI = nullptr;
2220 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
2222 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
2224 // FIXME: We should also fold the compare when the pointer escapes, but the
2225 // compare dominates the pointer escape
2226 if (MI && !PointerMayBeCaptured(MI, true, true))
2227 return ConstantInt::get(GetCompareTy(LHS),
2228 CmpInst::isFalseWhenEqual(Pred));
2235 /// Fold an icmp when its operands have i1 scalar type.
2236 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2237 Value *RHS, const Query &Q) {
2238 Type *ITy = GetCompareTy(LHS); // The return type.
2239 Type *OpTy = LHS->getType(); // The operand type.
2240 if (!OpTy->getScalarType()->isIntegerTy(1))
2246 case ICmpInst::ICMP_EQ:
2248 if (match(RHS, m_One()))
2251 case ICmpInst::ICMP_NE:
2253 if (match(RHS, m_Zero()))
2256 case ICmpInst::ICMP_UGT:
2258 if (match(RHS, m_Zero()))
2261 case ICmpInst::ICMP_UGE:
2263 if (match(RHS, m_One()))
2265 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2266 return getTrue(ITy);
2268 case ICmpInst::ICMP_SGE:
2269 /// For signed comparison, the values for an i1 are 0 and -1
2270 /// respectively. This maps into a truth table of:
2271 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2272 /// 0 | 0 | 1 (0 >= 0) | 1
2273 /// 0 | 1 | 1 (0 >= -1) | 1
2274 /// 1 | 0 | 0 (-1 >= 0) | 0
2275 /// 1 | 1 | 1 (-1 >= -1) | 1
2276 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2277 return getTrue(ITy);
2279 case ICmpInst::ICMP_SLT:
2281 if (match(RHS, m_Zero()))
2284 case ICmpInst::ICMP_SLE:
2286 if (match(RHS, m_One()))
2289 case ICmpInst::ICMP_ULE:
2290 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2291 return getTrue(ITy);
2298 /// Try hard to fold icmp with zero RHS because this is a common case.
2299 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2300 Value *RHS, const Query &Q) {
2301 if (!match(RHS, m_Zero()))
2304 Type *ITy = GetCompareTy(LHS); // The return type.
2305 bool LHSKnownNonNegative, LHSKnownNegative;
2308 llvm_unreachable("Unknown ICmp predicate!");
2309 case ICmpInst::ICMP_ULT:
2310 return getFalse(ITy);
2311 case ICmpInst::ICMP_UGE:
2312 return getTrue(ITy);
2313 case ICmpInst::ICMP_EQ:
2314 case ICmpInst::ICMP_ULE:
2315 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2316 return getFalse(ITy);
2318 case ICmpInst::ICMP_NE:
2319 case ICmpInst::ICMP_UGT:
2320 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2321 return getTrue(ITy);
2323 case ICmpInst::ICMP_SLT:
2324 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2326 if (LHSKnownNegative)
2327 return getTrue(ITy);
2328 if (LHSKnownNonNegative)
2329 return getFalse(ITy);
2331 case ICmpInst::ICMP_SLE:
2332 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2334 if (LHSKnownNegative)
2335 return getTrue(ITy);
2336 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2337 return getFalse(ITy);
2339 case ICmpInst::ICMP_SGE:
2340 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2342 if (LHSKnownNegative)
2343 return getFalse(ITy);
2344 if (LHSKnownNonNegative)
2345 return getTrue(ITy);
2347 case ICmpInst::ICMP_SGT:
2348 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2350 if (LHSKnownNegative)
2351 return getFalse(ITy);
2352 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2353 return getTrue(ITy);
2360 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2363 if (!match(RHS, m_APInt(C)))
2366 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2367 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2368 if (RHS_CR.isEmptySet())
2369 return ConstantInt::getFalse(GetCompareTy(RHS));
2370 if (RHS_CR.isFullSet())
2371 return ConstantInt::getTrue(GetCompareTy(RHS));
2373 // Many binary operators with constant RHS have easy to compute constant
2374 // range. Use them to check whether the comparison is a tautology.
2375 unsigned Width = C->getBitWidth();
2376 APInt Lower = APInt(Width, 0);
2377 APInt Upper = APInt(Width, 0);
2379 if (match(LHS, m_URem(m_Value(), m_APInt(C2)))) {
2380 // 'urem x, C2' produces [0, C2).
2382 } else if (match(LHS, m_SRem(m_Value(), m_APInt(C2)))) {
2383 // 'srem x, C2' produces (-|C2|, |C2|).
2385 Lower = (-Upper) + 1;
2386 } else if (match(LHS, m_UDiv(m_APInt(C2), m_Value()))) {
2387 // 'udiv C2, x' produces [0, C2].
2389 } else if (match(LHS, m_UDiv(m_Value(), m_APInt(C2)))) {
2390 // 'udiv x, C2' produces [0, UINT_MAX / C2].
2391 APInt NegOne = APInt::getAllOnesValue(Width);
2393 Upper = NegOne.udiv(*C2) + 1;
2394 } else if (match(LHS, m_SDiv(m_APInt(C2), m_Value()))) {
2395 if (C2->isMinSignedValue()) {
2396 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
2398 Upper = Lower.lshr(1) + 1;
2400 // 'sdiv C2, x' produces [-|C2|, |C2|].
2401 Upper = C2->abs() + 1;
2402 Lower = (-Upper) + 1;
2404 } else if (match(LHS, m_SDiv(m_Value(), m_APInt(C2)))) {
2405 APInt IntMin = APInt::getSignedMinValue(Width);
2406 APInt IntMax = APInt::getSignedMaxValue(Width);
2407 if (C2->isAllOnesValue()) {
2408 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
2409 // where C2 != -1 and C2 != 0 and C2 != 1
2412 } else if (C2->countLeadingZeros() < Width - 1) {
2413 // 'sdiv x, C2' produces [INT_MIN / C2, INT_MAX / C2]
2414 // where C2 != -1 and C2 != 0 and C2 != 1
2415 Lower = IntMin.sdiv(*C2);
2416 Upper = IntMax.sdiv(*C2);
2417 if (Lower.sgt(Upper))
2418 std::swap(Lower, Upper);
2420 assert(Upper != Lower && "Upper part of range has wrapped!");
2422 } else if (match(LHS, m_NUWShl(m_APInt(C2), m_Value()))) {
2423 // 'shl nuw C2, x' produces [C2, C2 << CLZ(C2)]
2425 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
2426 } else if (match(LHS, m_NSWShl(m_APInt(C2), m_Value()))) {
2427 if (C2->isNegative()) {
2428 // 'shl nsw C2, x' produces [C2 << CLO(C2)-1, C2]
2429 unsigned ShiftAmount = C2->countLeadingOnes() - 1;
2430 Lower = C2->shl(ShiftAmount);
2433 // 'shl nsw C2, x' produces [C2, C2 << CLZ(C2)-1]
2434 unsigned ShiftAmount = C2->countLeadingZeros() - 1;
2436 Upper = C2->shl(ShiftAmount) + 1;
2438 } else if (match(LHS, m_LShr(m_Value(), m_APInt(C2)))) {
2439 // 'lshr x, C2' produces [0, UINT_MAX >> C2].
2440 APInt NegOne = APInt::getAllOnesValue(Width);
2442 Upper = NegOne.lshr(*C2) + 1;
2443 } else if (match(LHS, m_LShr(m_APInt(C2), m_Value()))) {
2444 // 'lshr C2, x' produces [C2 >> (Width-1), C2].
2445 unsigned ShiftAmount = Width - 1;
2446 if (*C2 != 0 && cast<BinaryOperator>(LHS)->isExact())
2447 ShiftAmount = C2->countTrailingZeros();
2448 Lower = C2->lshr(ShiftAmount);
2450 } else if (match(LHS, m_AShr(m_Value(), m_APInt(C2)))) {
2451 // 'ashr x, C2' produces [INT_MIN >> C2, INT_MAX >> C2].
2452 APInt IntMin = APInt::getSignedMinValue(Width);
2453 APInt IntMax = APInt::getSignedMaxValue(Width);
2454 if (C2->ult(Width)) {
2455 Lower = IntMin.ashr(*C2);
2456 Upper = IntMax.ashr(*C2) + 1;
2458 } else if (match(LHS, m_AShr(m_APInt(C2), m_Value()))) {
2459 unsigned ShiftAmount = Width - 1;
2460 if (*C2 != 0 && cast<BinaryOperator>(LHS)->isExact())
2461 ShiftAmount = C2->countTrailingZeros();
2462 if (C2->isNegative()) {
2463 // 'ashr C2, x' produces [C2, C2 >> (Width-1)]
2465 Upper = C2->ashr(ShiftAmount) + 1;
2467 // 'ashr C2, x' produces [C2 >> (Width-1), C2]
2468 Lower = C2->ashr(ShiftAmount);
2471 } else if (match(LHS, m_Or(m_Value(), m_APInt(C2)))) {
2472 // 'or x, C2' produces [C2, UINT_MAX].
2474 } else if (match(LHS, m_And(m_Value(), m_APInt(C2)))) {
2475 // 'and x, C2' produces [0, C2].
2477 } else if (match(LHS, m_NUWAdd(m_Value(), m_APInt(C2)))) {
2478 // 'add nuw x, C2' produces [C2, UINT_MAX].
2482 ConstantRange LHS_CR =
2483 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
2485 if (auto *I = dyn_cast<Instruction>(LHS))
2486 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
2487 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges));
2489 if (!LHS_CR.isFullSet()) {
2490 if (RHS_CR.contains(LHS_CR))
2491 return ConstantInt::getTrue(GetCompareTy(RHS));
2492 if (RHS_CR.inverse().contains(LHS_CR))
2493 return ConstantInt::getFalse(GetCompareTy(RHS));
2499 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2500 Value *RHS, const Query &Q,
2501 unsigned MaxRecurse) {
2502 Type *ITy = GetCompareTy(LHS); // The return type.
2504 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2505 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2506 if (MaxRecurse && (LBO || RBO)) {
2507 // Analyze the case when either LHS or RHS is an add instruction.
2508 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2509 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2510 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2511 if (LBO && LBO->getOpcode() == Instruction::Add) {
2512 A = LBO->getOperand(0);
2513 B = LBO->getOperand(1);
2515 ICmpInst::isEquality(Pred) ||
2516 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
2517 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
2519 if (RBO && RBO->getOpcode() == Instruction::Add) {
2520 C = RBO->getOperand(0);
2521 D = RBO->getOperand(1);
2523 ICmpInst::isEquality(Pred) ||
2524 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
2525 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
2528 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2529 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2530 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2531 Constant::getNullValue(RHS->getType()), Q,
2535 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2536 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2538 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2539 C == LHS ? D : C, Q, MaxRecurse - 1))
2542 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2543 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
2545 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2548 // C + B == C + D -> B == D
2551 } else if (A == D) {
2552 // D + B == C + D -> B == C
2555 } else if (B == C) {
2556 // A + C == C + D -> A == D
2561 // A + D == C + D -> A == C
2565 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
2572 // icmp pred (or X, Y), X
2573 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2574 if (Pred == ICmpInst::ICMP_ULT)
2575 return getFalse(ITy);
2576 if (Pred == ICmpInst::ICMP_UGE)
2577 return getTrue(ITy);
2579 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2580 bool RHSKnownNonNegative, RHSKnownNegative;
2581 bool YKnownNonNegative, YKnownNegative;
2582 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, Q.DL, 0,
2583 Q.AC, Q.CxtI, Q.DT);
2584 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
2586 if (RHSKnownNonNegative && YKnownNegative)
2587 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2588 if (RHSKnownNegative || YKnownNonNegative)
2589 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2592 // icmp pred X, (or X, Y)
2593 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) {
2594 if (Pred == ICmpInst::ICMP_ULE)
2595 return getTrue(ITy);
2596 if (Pred == ICmpInst::ICMP_UGT)
2597 return getFalse(ITy);
2599 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) {
2600 bool LHSKnownNonNegative, LHSKnownNegative;
2601 bool YKnownNonNegative, YKnownNegative;
2602 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0,
2603 Q.AC, Q.CxtI, Q.DT);
2604 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
2606 if (LHSKnownNonNegative && YKnownNegative)
2607 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
2608 if (LHSKnownNegative || YKnownNonNegative)
2609 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy);
2614 // icmp pred (and X, Y), X
2615 if (LBO && match(LBO, m_CombineOr(m_And(m_Value(), m_Specific(RHS)),
2616 m_And(m_Specific(RHS), m_Value())))) {
2617 if (Pred == ICmpInst::ICMP_UGT)
2618 return getFalse(ITy);
2619 if (Pred == ICmpInst::ICMP_ULE)
2620 return getTrue(ITy);
2622 // icmp pred X, (and X, Y)
2623 if (RBO && match(RBO, m_CombineOr(m_And(m_Value(), m_Specific(LHS)),
2624 m_And(m_Specific(LHS), m_Value())))) {
2625 if (Pred == ICmpInst::ICMP_UGE)
2626 return getTrue(ITy);
2627 if (Pred == ICmpInst::ICMP_ULT)
2628 return getFalse(ITy);
2631 // 0 - (zext X) pred C
2632 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
2633 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2634 if (RHSC->getValue().isStrictlyPositive()) {
2635 if (Pred == ICmpInst::ICMP_SLT)
2636 return ConstantInt::getTrue(RHSC->getContext());
2637 if (Pred == ICmpInst::ICMP_SGE)
2638 return ConstantInt::getFalse(RHSC->getContext());
2639 if (Pred == ICmpInst::ICMP_EQ)
2640 return ConstantInt::getFalse(RHSC->getContext());
2641 if (Pred == ICmpInst::ICMP_NE)
2642 return ConstantInt::getTrue(RHSC->getContext());
2644 if (RHSC->getValue().isNonNegative()) {
2645 if (Pred == ICmpInst::ICMP_SLE)
2646 return ConstantInt::getTrue(RHSC->getContext());
2647 if (Pred == ICmpInst::ICMP_SGT)
2648 return ConstantInt::getFalse(RHSC->getContext());
2653 // icmp pred (urem X, Y), Y
2654 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2655 bool KnownNonNegative, KnownNegative;
2659 case ICmpInst::ICMP_SGT:
2660 case ICmpInst::ICMP_SGE:
2661 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2663 if (!KnownNonNegative)
2666 case ICmpInst::ICMP_EQ:
2667 case ICmpInst::ICMP_UGT:
2668 case ICmpInst::ICMP_UGE:
2669 return getFalse(ITy);
2670 case ICmpInst::ICMP_SLT:
2671 case ICmpInst::ICMP_SLE:
2672 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2674 if (!KnownNonNegative)
2677 case ICmpInst::ICMP_NE:
2678 case ICmpInst::ICMP_ULT:
2679 case ICmpInst::ICMP_ULE:
2680 return getTrue(ITy);
2684 // icmp pred X, (urem Y, X)
2685 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
2686 bool KnownNonNegative, KnownNegative;
2690 case ICmpInst::ICMP_SGT:
2691 case ICmpInst::ICMP_SGE:
2692 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2694 if (!KnownNonNegative)
2697 case ICmpInst::ICMP_NE:
2698 case ICmpInst::ICMP_UGT:
2699 case ICmpInst::ICMP_UGE:
2700 return getTrue(ITy);
2701 case ICmpInst::ICMP_SLT:
2702 case ICmpInst::ICMP_SLE:
2703 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2705 if (!KnownNonNegative)
2708 case ICmpInst::ICMP_EQ:
2709 case ICmpInst::ICMP_ULT:
2710 case ICmpInst::ICMP_ULE:
2711 return getFalse(ITy);
2717 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2718 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) {
2719 // icmp pred (X op Y), X
2720 if (Pred == ICmpInst::ICMP_UGT)
2721 return getFalse(ITy);
2722 if (Pred == ICmpInst::ICMP_ULE)
2723 return getTrue(ITy);
2728 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) ||
2729 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) {
2730 // icmp pred X, (X op Y)
2731 if (Pred == ICmpInst::ICMP_ULT)
2732 return getFalse(ITy);
2733 if (Pred == ICmpInst::ICMP_UGE)
2734 return getTrue(ITy);
2741 // where CI2 is a power of 2 and CI isn't
2742 if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
2743 const APInt *CI2Val, *CIVal = &CI->getValue();
2744 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
2745 CI2Val->isPowerOf2()) {
2746 if (!CIVal->isPowerOf2()) {
2747 // CI2 << X can equal zero in some circumstances,
2748 // this simplification is unsafe if CI is zero.
2750 // We know it is safe if:
2751 // - The shift is nsw, we can't shift out the one bit.
2752 // - The shift is nuw, we can't shift out the one bit.
2755 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
2756 *CI2Val == 1 || !CI->isZero()) {
2757 if (Pred == ICmpInst::ICMP_EQ)
2758 return ConstantInt::getFalse(RHS->getContext());
2759 if (Pred == ICmpInst::ICMP_NE)
2760 return ConstantInt::getTrue(RHS->getContext());
2763 if (CIVal->isSignBit() && *CI2Val == 1) {
2764 if (Pred == ICmpInst::ICMP_UGT)
2765 return ConstantInt::getFalse(RHS->getContext());
2766 if (Pred == ICmpInst::ICMP_ULE)
2767 return ConstantInt::getTrue(RHS->getContext());
2772 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
2773 LBO->getOperand(1) == RBO->getOperand(1)) {
2774 switch (LBO->getOpcode()) {
2777 case Instruction::UDiv:
2778 case Instruction::LShr:
2779 if (ICmpInst::isSigned(Pred))
2782 case Instruction::SDiv:
2783 case Instruction::AShr:
2784 if (!LBO->isExact() || !RBO->isExact())
2786 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2787 RBO->getOperand(0), Q, MaxRecurse - 1))
2790 case Instruction::Shl: {
2791 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
2792 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
2795 if (!NSW && ICmpInst::isSigned(Pred))
2797 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2798 RBO->getOperand(0), Q, MaxRecurse - 1))
2807 /// Simplify integer comparisons where at least one operand of the compare
2808 /// matches an integer min/max idiom.
2809 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
2810 Value *RHS, const Query &Q,
2811 unsigned MaxRecurse) {
2812 Type *ITy = GetCompareTy(LHS); // The return type.
2814 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
2815 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
2817 // Signed variants on "max(a,b)>=a -> true".
2818 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2820 std::swap(A, B); // smax(A, B) pred A.
2821 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2822 // We analyze this as smax(A, B) pred A.
2824 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
2825 (A == LHS || B == LHS)) {
2827 std::swap(A, B); // A pred smax(A, B).
2828 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2829 // We analyze this as smax(A, B) swapped-pred A.
2830 P = CmpInst::getSwappedPredicate(Pred);
2831 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
2832 (A == RHS || B == RHS)) {
2834 std::swap(A, B); // smin(A, B) pred A.
2835 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2836 // We analyze this as smax(-A, -B) swapped-pred -A.
2837 // Note that we do not need to actually form -A or -B thanks to EqP.
2838 P = CmpInst::getSwappedPredicate(Pred);
2839 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
2840 (A == LHS || B == LHS)) {
2842 std::swap(A, B); // A pred smin(A, B).
2843 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2844 // We analyze this as smax(-A, -B) pred -A.
2845 // Note that we do not need to actually form -A or -B thanks to EqP.
2848 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2849 // Cases correspond to "max(A, B) p A".
2853 case CmpInst::ICMP_EQ:
2854 case CmpInst::ICMP_SLE:
2855 // Equivalent to "A EqP B". This may be the same as the condition tested
2856 // in the max/min; if so, we can just return that.
2857 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2859 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2861 // Otherwise, see if "A EqP B" simplifies.
2863 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2866 case CmpInst::ICMP_NE:
2867 case CmpInst::ICMP_SGT: {
2868 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2869 // Equivalent to "A InvEqP B". This may be the same as the condition
2870 // tested in the max/min; if so, we can just return that.
2871 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2873 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
2875 // Otherwise, see if "A InvEqP B" simplifies.
2877 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
2881 case CmpInst::ICMP_SGE:
2883 return getTrue(ITy);
2884 case CmpInst::ICMP_SLT:
2886 return getFalse(ITy);
2890 // Unsigned variants on "max(a,b)>=a -> true".
2891 P = CmpInst::BAD_ICMP_PREDICATE;
2892 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2894 std::swap(A, B); // umax(A, B) pred A.
2895 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2896 // We analyze this as umax(A, B) pred A.
2898 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
2899 (A == LHS || B == LHS)) {
2901 std::swap(A, B); // A pred umax(A, B).
2902 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
2903 // We analyze this as umax(A, B) swapped-pred A.
2904 P = CmpInst::getSwappedPredicate(Pred);
2905 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
2906 (A == RHS || B == RHS)) {
2908 std::swap(A, B); // umin(A, B) pred A.
2909 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2910 // We analyze this as umax(-A, -B) swapped-pred -A.
2911 // Note that we do not need to actually form -A or -B thanks to EqP.
2912 P = CmpInst::getSwappedPredicate(Pred);
2913 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
2914 (A == LHS || B == LHS)) {
2916 std::swap(A, B); // A pred umin(A, B).
2917 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
2918 // We analyze this as umax(-A, -B) pred -A.
2919 // Note that we do not need to actually form -A or -B thanks to EqP.
2922 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2923 // Cases correspond to "max(A, B) p A".
2927 case CmpInst::ICMP_EQ:
2928 case CmpInst::ICMP_ULE:
2929 // Equivalent to "A EqP B". This may be the same as the condition tested
2930 // in the max/min; if so, we can just return that.
2931 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2933 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2935 // Otherwise, see if "A EqP B" simplifies.
2937 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2940 case CmpInst::ICMP_NE:
2941 case CmpInst::ICMP_UGT: {
2942 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2943 // Equivalent to "A InvEqP B". This may be the same as the condition
2944 // tested in the max/min; if so, we can just return that.
2945 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2947 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
2949 // Otherwise, see if "A InvEqP B" simplifies.
2951 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
2955 case CmpInst::ICMP_UGE:
2957 return getTrue(ITy);
2958 case CmpInst::ICMP_ULT:
2960 return getFalse(ITy);
2964 // Variants on "max(x,y) >= min(x,z)".
2966 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
2967 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
2968 (A == C || A == D || B == C || B == D)) {
2969 // max(x, ?) pred min(x, ?).
2970 if (Pred == CmpInst::ICMP_SGE)
2972 return getTrue(ITy);
2973 if (Pred == CmpInst::ICMP_SLT)
2975 return getFalse(ITy);
2976 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
2977 match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
2978 (A == C || A == D || B == C || B == D)) {
2979 // min(x, ?) pred max(x, ?).
2980 if (Pred == CmpInst::ICMP_SLE)
2982 return getTrue(ITy);
2983 if (Pred == CmpInst::ICMP_SGT)
2985 return getFalse(ITy);
2986 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
2987 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
2988 (A == C || A == D || B == C || B == D)) {
2989 // max(x, ?) pred min(x, ?).
2990 if (Pred == CmpInst::ICMP_UGE)
2992 return getTrue(ITy);
2993 if (Pred == CmpInst::ICMP_ULT)
2995 return getFalse(ITy);
2996 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
2997 match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
2998 (A == C || A == D || B == C || B == D)) {
2999 // min(x, ?) pred max(x, ?).
3000 if (Pred == CmpInst::ICMP_ULE)
3002 return getTrue(ITy);
3003 if (Pred == CmpInst::ICMP_UGT)
3005 return getFalse(ITy);
3011 /// Given operands for an ICmpInst, see if we can fold the result.
3012 /// If not, this returns null.
3013 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3014 const Query &Q, unsigned MaxRecurse) {
3015 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3016 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3018 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3019 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3020 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3022 // If we have a constant, make sure it is on the RHS.
3023 std::swap(LHS, RHS);
3024 Pred = CmpInst::getSwappedPredicate(Pred);
3027 Type *ITy = GetCompareTy(LHS); // The return type.
3029 // icmp X, X -> true/false
3030 // X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
3031 // because X could be 0.
3032 if (LHS == RHS || isa<UndefValue>(RHS))
3033 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3035 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3038 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3041 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS))
3044 // If both operands have range metadata, use the metadata
3045 // to simplify the comparison.
3046 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3047 auto RHS_Instr = dyn_cast<Instruction>(RHS);
3048 auto LHS_Instr = dyn_cast<Instruction>(LHS);
3050 if (RHS_Instr->getMetadata(LLVMContext::MD_range) &&
3051 LHS_Instr->getMetadata(LLVMContext::MD_range)) {
3052 auto RHS_CR = getConstantRangeFromMetadata(
3053 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3054 auto LHS_CR = getConstantRangeFromMetadata(
3055 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3057 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
3058 if (Satisfied_CR.contains(LHS_CR))
3059 return ConstantInt::getTrue(RHS->getContext());
3061 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
3062 CmpInst::getInversePredicate(Pred), RHS_CR);
3063 if (InversedSatisfied_CR.contains(LHS_CR))
3064 return ConstantInt::getFalse(RHS->getContext());
3068 // Compare of cast, for example (zext X) != 0 -> X != 0
3069 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3070 Instruction *LI = cast<CastInst>(LHS);
3071 Value *SrcOp = LI->getOperand(0);
3072 Type *SrcTy = SrcOp->getType();
3073 Type *DstTy = LI->getType();
3075 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3076 // if the integer type is the same size as the pointer type.
3077 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3078 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3079 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3080 // Transfer the cast to the constant.
3081 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3082 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3085 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3086 if (RI->getOperand(0)->getType() == SrcTy)
3087 // Compare without the cast.
3088 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3094 if (isa<ZExtInst>(LHS)) {
3095 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3097 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3098 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3099 // Compare X and Y. Note that signed predicates become unsigned.
3100 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3101 SrcOp, RI->getOperand(0), Q,
3105 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3106 // too. If not, then try to deduce the result of the comparison.
3107 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3108 // Compute the constant that would happen if we truncated to SrcTy then
3109 // reextended to DstTy.
3110 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3111 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3113 // If the re-extended constant didn't change then this is effectively
3114 // also a case of comparing two zero-extended values.
3115 if (RExt == CI && MaxRecurse)
3116 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3117 SrcOp, Trunc, Q, MaxRecurse-1))
3120 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3121 // there. Use this to work out the result of the comparison.
3124 default: llvm_unreachable("Unknown ICmp predicate!");
3126 case ICmpInst::ICMP_EQ:
3127 case ICmpInst::ICMP_UGT:
3128 case ICmpInst::ICMP_UGE:
3129 return ConstantInt::getFalse(CI->getContext());
3131 case ICmpInst::ICMP_NE:
3132 case ICmpInst::ICMP_ULT:
3133 case ICmpInst::ICMP_ULE:
3134 return ConstantInt::getTrue(CI->getContext());
3136 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3137 // is non-negative then LHS <s RHS.
3138 case ICmpInst::ICMP_SGT:
3139 case ICmpInst::ICMP_SGE:
3140 return CI->getValue().isNegative() ?
3141 ConstantInt::getTrue(CI->getContext()) :
3142 ConstantInt::getFalse(CI->getContext());
3144 case ICmpInst::ICMP_SLT:
3145 case ICmpInst::ICMP_SLE:
3146 return CI->getValue().isNegative() ?
3147 ConstantInt::getFalse(CI->getContext()) :
3148 ConstantInt::getTrue(CI->getContext());
3154 if (isa<SExtInst>(LHS)) {
3155 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3157 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3158 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3159 // Compare X and Y. Note that the predicate does not change.
3160 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3164 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3165 // too. If not, then try to deduce the result of the comparison.
3166 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3167 // Compute the constant that would happen if we truncated to SrcTy then
3168 // reextended to DstTy.
3169 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3170 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3172 // If the re-extended constant didn't change then this is effectively
3173 // also a case of comparing two sign-extended values.
3174 if (RExt == CI && MaxRecurse)
3175 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3178 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3179 // bits there. Use this to work out the result of the comparison.
3182 default: llvm_unreachable("Unknown ICmp predicate!");
3183 case ICmpInst::ICMP_EQ:
3184 return ConstantInt::getFalse(CI->getContext());
3185 case ICmpInst::ICMP_NE:
3186 return ConstantInt::getTrue(CI->getContext());
3188 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3190 case ICmpInst::ICMP_SGT:
3191 case ICmpInst::ICMP_SGE:
3192 return CI->getValue().isNegative() ?
3193 ConstantInt::getTrue(CI->getContext()) :
3194 ConstantInt::getFalse(CI->getContext());
3195 case ICmpInst::ICMP_SLT:
3196 case ICmpInst::ICMP_SLE:
3197 return CI->getValue().isNegative() ?
3198 ConstantInt::getFalse(CI->getContext()) :
3199 ConstantInt::getTrue(CI->getContext());
3201 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3203 case ICmpInst::ICMP_UGT:
3204 case ICmpInst::ICMP_UGE:
3205 // Comparison is true iff the LHS <s 0.
3207 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3208 Constant::getNullValue(SrcTy),
3212 case ICmpInst::ICMP_ULT:
3213 case ICmpInst::ICMP_ULE:
3214 // Comparison is true iff the LHS >=s 0.
3216 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3217 Constant::getNullValue(SrcTy),
3227 // icmp eq|ne X, Y -> false|true if X != Y
3228 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
3229 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
3230 LLVMContext &Ctx = LHS->getType()->getContext();
3231 return Pred == ICmpInst::ICMP_NE ?
3232 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
3235 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3238 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3241 // Simplify comparisons of related pointers using a powerful, recursive
3242 // GEP-walk when we have target data available..
3243 if (LHS->getType()->isPointerTy())
3244 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS))
3246 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3247 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3248 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3249 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3250 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3251 Q.DL.getTypeSizeInBits(CRHS->getType()))
3252 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI,
3253 CLHS->getPointerOperand(),
3254 CRHS->getPointerOperand()))
3257 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3258 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3259 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3260 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3261 (ICmpInst::isEquality(Pred) ||
3262 (GLHS->isInBounds() && GRHS->isInBounds() &&
3263 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3264 // The bases are equal and the indices are constant. Build a constant
3265 // expression GEP with the same indices and a null base pointer to see
3266 // what constant folding can make out of it.
3267 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3268 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
3269 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3270 GLHS->getSourceElementType(), Null, IndicesLHS);
3272 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3273 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3274 GLHS->getSourceElementType(), Null, IndicesRHS);
3275 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3280 // If a bit is known to be zero for A and known to be one for B,
3281 // then A and B cannot be equal.
3282 if (ICmpInst::isEquality(Pred)) {
3283 const APInt *RHSVal;
3284 if (match(RHS, m_APInt(RHSVal))) {
3285 unsigned BitWidth = RHSVal->getBitWidth();
3286 APInt LHSKnownZero(BitWidth, 0);
3287 APInt LHSKnownOne(BitWidth, 0);
3288 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, /*Depth=*/0, Q.AC,
3290 if (((LHSKnownZero & *RHSVal) != 0) || ((LHSKnownOne & ~(*RHSVal)) != 0))
3291 return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
3292 : ConstantInt::getTrue(ITy);
3296 // If the comparison is with the result of a select instruction, check whether
3297 // comparing with either branch of the select always yields the same value.
3298 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3299 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3302 // If the comparison is with the result of a phi instruction, check whether
3303 // doing the compare with each incoming phi value yields a common result.
3304 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3305 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3311 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3312 const DataLayout &DL,
3313 const TargetLibraryInfo *TLI,
3314 const DominatorTree *DT, AssumptionCache *AC,
3315 const Instruction *CxtI) {
3316 return ::SimplifyICmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
3320 /// Given operands for an FCmpInst, see if we can fold the result.
3321 /// If not, this returns null.
3322 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3323 FastMathFlags FMF, const Query &Q,
3324 unsigned MaxRecurse) {
3325 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3326 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3328 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3329 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3330 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3332 // If we have a constant, make sure it is on the RHS.
3333 std::swap(LHS, RHS);
3334 Pred = CmpInst::getSwappedPredicate(Pred);
3337 // Fold trivial predicates.
3338 Type *RetTy = GetCompareTy(LHS);
3339 if (Pred == FCmpInst::FCMP_FALSE)
3340 return getFalse(RetTy);
3341 if (Pred == FCmpInst::FCMP_TRUE)
3342 return getTrue(RetTy);
3344 // UNO/ORD predicates can be trivially folded if NaNs are ignored.
3346 if (Pred == FCmpInst::FCMP_UNO)
3347 return getFalse(RetTy);
3348 if (Pred == FCmpInst::FCMP_ORD)
3349 return getTrue(RetTy);
3352 // fcmp pred x, undef and fcmp pred undef, x
3353 // fold to true if unordered, false if ordered
3354 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
3355 // Choosing NaN for the undef will always make unordered comparison succeed
3356 // and ordered comparison fail.
3357 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3360 // fcmp x,x -> true/false. Not all compares are foldable.
3362 if (CmpInst::isTrueWhenEqual(Pred))
3363 return getTrue(RetTy);
3364 if (CmpInst::isFalseWhenEqual(Pred))
3365 return getFalse(RetTy);
3368 // Handle fcmp with constant RHS
3369 const ConstantFP *CFP = nullptr;
3370 if (const auto *RHSC = dyn_cast<Constant>(RHS)) {
3371 if (RHS->getType()->isVectorTy())
3372 CFP = dyn_cast_or_null<ConstantFP>(RHSC->getSplatValue());
3374 CFP = dyn_cast<ConstantFP>(RHSC);
3377 // If the constant is a nan, see if we can fold the comparison based on it.
3378 if (CFP->getValueAPF().isNaN()) {
3379 if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo"
3380 return getFalse(RetTy);
3381 assert(FCmpInst::isUnordered(Pred) &&
3382 "Comparison must be either ordered or unordered!");
3383 // True if unordered.
3384 return getTrue(RetTy);
3386 // Check whether the constant is an infinity.
3387 if (CFP->getValueAPF().isInfinity()) {
3388 if (CFP->getValueAPF().isNegative()) {
3390 case FCmpInst::FCMP_OLT:
3391 // No value is ordered and less than negative infinity.
3392 return getFalse(RetTy);
3393 case FCmpInst::FCMP_UGE:
3394 // All values are unordered with or at least negative infinity.
3395 return getTrue(RetTy);
3401 case FCmpInst::FCMP_OGT:
3402 // No value is ordered and greater than infinity.
3403 return getFalse(RetTy);
3404 case FCmpInst::FCMP_ULE:
3405 // All values are unordered with and at most infinity.
3406 return getTrue(RetTy);
3412 if (CFP->getValueAPF().isZero()) {
3414 case FCmpInst::FCMP_UGE:
3415 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3416 return getTrue(RetTy);
3418 case FCmpInst::FCMP_OLT:
3420 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3421 return getFalse(RetTy);
3429 // If the comparison is with the result of a select instruction, check whether
3430 // comparing with either branch of the select always yields the same value.
3431 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3432 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3435 // If the comparison is with the result of a phi instruction, check whether
3436 // doing the compare with each incoming phi value yields a common result.
3437 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3438 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3444 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3445 FastMathFlags FMF, const DataLayout &DL,
3446 const TargetLibraryInfo *TLI,
3447 const DominatorTree *DT, AssumptionCache *AC,
3448 const Instruction *CxtI) {
3449 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF,
3450 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
3453 /// See if V simplifies when its operand Op is replaced with RepOp.
3454 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3456 unsigned MaxRecurse) {
3457 // Trivial replacement.
3461 auto *I = dyn_cast<Instruction>(V);
3465 // If this is a binary operator, try to simplify it with the replaced op.
3466 if (auto *B = dyn_cast<BinaryOperator>(I)) {
3468 // %cmp = icmp eq i32 %x, 2147483647
3469 // %add = add nsw i32 %x, 1
3470 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3472 // We can't replace %sel with %add unless we strip away the flags.
3473 if (isa<OverflowingBinaryOperator>(B))
3474 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
3476 if (isa<PossiblyExactOperator>(B))
3481 if (B->getOperand(0) == Op)
3482 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
3484 if (B->getOperand(1) == Op)
3485 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
3490 // Same for CmpInsts.
3491 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
3493 if (C->getOperand(0) == Op)
3494 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
3496 if (C->getOperand(1) == Op)
3497 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
3502 // TODO: We could hand off more cases to instsimplify here.
3504 // If all operands are constant after substituting Op for RepOp then we can
3505 // constant fold the instruction.
3506 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3507 // Build a list of all constant operands.
3508 SmallVector<Constant *, 8> ConstOps;
3509 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3510 if (I->getOperand(i) == Op)
3511 ConstOps.push_back(CRepOp);
3512 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
3513 ConstOps.push_back(COp);
3518 // All operands were constants, fold it.
3519 if (ConstOps.size() == I->getNumOperands()) {
3520 if (CmpInst *C = dyn_cast<CmpInst>(I))
3521 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3522 ConstOps[1], Q.DL, Q.TLI);
3524 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3525 if (!LI->isVolatile())
3526 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3528 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3535 /// Try to simplify a select instruction when its condition operand is an
3536 /// integer comparison where one operand of the compare is a constant.
3537 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3538 const APInt *Y, bool TrueWhenUnset) {
3541 // (X & Y) == 0 ? X & ~Y : X --> X
3542 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3543 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3545 return TrueWhenUnset ? FalseVal : TrueVal;
3547 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3548 // (X & Y) != 0 ? X : X & ~Y --> X
3549 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3551 return TrueWhenUnset ? FalseVal : TrueVal;
3553 if (Y->isPowerOf2()) {
3554 // (X & Y) == 0 ? X | Y : X --> X | Y
3555 // (X & Y) != 0 ? X | Y : X --> X
3556 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3558 return TrueWhenUnset ? TrueVal : FalseVal;
3560 // (X & Y) == 0 ? X : X | Y --> X
3561 // (X & Y) != 0 ? X : X | Y --> X | Y
3562 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3564 return TrueWhenUnset ? TrueVal : FalseVal;
3570 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
3572 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal,
3574 bool TrueWhenUnset) {
3575 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
3579 APInt MinSignedValue;
3581 if (match(CmpLHS, m_Trunc(m_Value(X))) && (X == TrueVal || X == FalseVal)) {
3582 // icmp slt (trunc X), 0 <--> icmp ne (and X, C), 0
3583 // icmp sgt (trunc X), -1 <--> icmp eq (and X, C), 0
3584 unsigned DestSize = CmpLHS->getType()->getScalarSizeInBits();
3585 MinSignedValue = APInt::getSignedMinValue(DestSize).zext(BitWidth);
3587 // icmp slt X, 0 <--> icmp ne (and X, C), 0
3588 // icmp sgt X, -1 <--> icmp eq (and X, C), 0
3590 MinSignedValue = APInt::getSignedMinValue(BitWidth);
3593 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, &MinSignedValue,
3600 /// Try to simplify a select instruction when its condition operand is an
3601 /// integer comparison.
3602 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
3603 Value *FalseVal, const Query &Q,
3604 unsigned MaxRecurse) {
3605 ICmpInst::Predicate Pred;
3606 Value *CmpLHS, *CmpRHS;
3607 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
3610 // FIXME: This code is nearly duplicated in InstCombine. Using/refactoring
3611 // decomposeBitTestICmp() might help.
3612 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) {
3615 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
3616 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
3617 Pred == ICmpInst::ICMP_EQ))
3619 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
3620 // Comparing signed-less-than 0 checks if the sign bit is set.
3621 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal,
3624 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
3625 // Comparing signed-greater-than -1 checks if the sign bit is not set.
3626 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal,
3631 if (CondVal->hasOneUse()) {
3633 if (match(CmpRHS, m_APInt(C))) {
3634 // X < MIN ? T : F --> F
3635 if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue())
3637 // X < MIN ? T : F --> F
3638 if (Pred == ICmpInst::ICMP_ULT && C->isMinValue())
3640 // X > MAX ? T : F --> F
3641 if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue())
3643 // X > MAX ? T : F --> F
3644 if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue())
3649 // If we have an equality comparison, then we know the value in one of the
3650 // arms of the select. See if substituting this value into the arm and
3651 // simplifying the result yields the same value as the other arm.
3652 if (Pred == ICmpInst::ICMP_EQ) {
3653 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3655 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3658 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3660 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3663 } else if (Pred == ICmpInst::ICMP_NE) {
3664 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3666 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3669 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3671 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3679 /// Given operands for a SelectInst, see if we can fold the result.
3680 /// If not, this returns null.
3681 static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
3682 Value *FalseVal, const Query &Q,
3683 unsigned MaxRecurse) {
3684 // select true, X, Y -> X
3685 // select false, X, Y -> Y
3686 if (Constant *CB = dyn_cast<Constant>(CondVal)) {
3687 if (CB->isAllOnesValue())
3689 if (CB->isNullValue())
3693 // select C, X, X -> X
3694 if (TrueVal == FalseVal)
3697 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
3698 if (isa<Constant>(TrueVal))
3702 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
3704 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
3708 simplifySelectWithICmpCond(CondVal, TrueVal, FalseVal, Q, MaxRecurse))
3714 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3715 const DataLayout &DL,
3716 const TargetLibraryInfo *TLI,
3717 const DominatorTree *DT, AssumptionCache *AC,
3718 const Instruction *CxtI) {
3719 return ::SimplifySelectInst(Cond, TrueVal, FalseVal,
3720 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
3723 /// Given operands for an GetElementPtrInst, see if we can fold the result.
3724 /// If not, this returns null.
3725 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3726 const Query &Q, unsigned) {
3727 // The type of the GEP pointer operand.
3729 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
3731 // getelementptr P -> P.
3732 if (Ops.size() == 1)
3735 // Compute the (pointer) type returned by the GEP instruction.
3736 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
3737 Type *GEPTy = PointerType::get(LastType, AS);
3738 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
3739 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3741 if (isa<UndefValue>(Ops[0]))
3742 return UndefValue::get(GEPTy);
3744 if (Ops.size() == 2) {
3745 // getelementptr P, 0 -> P.
3746 if (match(Ops[1], m_Zero()))
3750 if (Ty->isSized()) {
3753 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
3754 // getelementptr P, N -> P if P points to a type of zero size.
3755 if (TyAllocSize == 0)
3758 // The following transforms are only safe if the ptrtoint cast
3759 // doesn't truncate the pointers.
3760 if (Ops[1]->getType()->getScalarSizeInBits() ==
3761 Q.DL.getPointerSizeInBits(AS)) {
3762 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
3763 if (match(P, m_Zero()))
3764 return Constant::getNullValue(GEPTy);
3766 if (match(P, m_PtrToInt(m_Value(Temp))))
3767 if (Temp->getType() == GEPTy)
3772 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
3773 if (TyAllocSize == 1 &&
3774 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
3775 if (Value *R = PtrToIntOrZero(P))
3778 // getelementptr V, (ashr (sub P, V), C) -> Q
3779 // if P points to a type of size 1 << C.
3781 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3782 m_ConstantInt(C))) &&
3783 TyAllocSize == 1ULL << C)
3784 if (Value *R = PtrToIntOrZero(P))
3787 // getelementptr V, (sdiv (sub P, V), C) -> Q
3788 // if P points to a type of size C.
3790 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3791 m_SpecificInt(TyAllocSize))))
3792 if (Value *R = PtrToIntOrZero(P))
3798 if (Q.DL.getTypeAllocSize(LastType) == 1 &&
3799 all_of(Ops.slice(1).drop_back(1),
3800 [](Value *Idx) { return match(Idx, m_Zero()); })) {
3802 Q.DL.getPointerSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
3803 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == PtrWidth) {
3804 APInt BasePtrOffset(PtrWidth, 0);
3805 Value *StrippedBasePtr =
3806 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
3809 // gep (gep V, C), (sub 0, V) -> C
3810 if (match(Ops.back(),
3811 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) {
3812 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
3813 return ConstantExpr::getIntToPtr(CI, GEPTy);
3815 // gep (gep V, C), (xor V, -1) -> C-1
3816 if (match(Ops.back(),
3817 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) {
3818 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
3819 return ConstantExpr::getIntToPtr(CI, GEPTy);
3824 // Check to see if this is constant foldable.
3825 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3826 if (!isa<Constant>(Ops[i]))
3829 return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
3833 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3834 const DataLayout &DL,
3835 const TargetLibraryInfo *TLI,
3836 const DominatorTree *DT, AssumptionCache *AC,
3837 const Instruction *CxtI) {
3838 return ::SimplifyGEPInst(SrcTy, Ops,
3839 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
3842 /// Given operands for an InsertValueInst, see if we can fold the result.
3843 /// If not, this returns null.
3844 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
3845 ArrayRef<unsigned> Idxs, const Query &Q,
3847 if (Constant *CAgg = dyn_cast<Constant>(Agg))
3848 if (Constant *CVal = dyn_cast<Constant>(Val))
3849 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
3851 // insertvalue x, undef, n -> x
3852 if (match(Val, m_Undef()))
3855 // insertvalue x, (extractvalue y, n), n
3856 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
3857 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
3858 EV->getIndices() == Idxs) {
3859 // insertvalue undef, (extractvalue y, n), n -> y
3860 if (match(Agg, m_Undef()))
3861 return EV->getAggregateOperand();
3863 // insertvalue y, (extractvalue y, n), n -> y
3864 if (Agg == EV->getAggregateOperand())
3871 Value *llvm::SimplifyInsertValueInst(
3872 Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
3873 const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
3874 const Instruction *CxtI) {
3875 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI),
3879 /// Given operands for an ExtractValueInst, see if we can fold the result.
3880 /// If not, this returns null.
3881 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3882 const Query &, unsigned) {
3883 if (auto *CAgg = dyn_cast<Constant>(Agg))
3884 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
3886 // extractvalue x, (insertvalue y, elt, n), n -> elt
3887 unsigned NumIdxs = Idxs.size();
3888 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
3889 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
3890 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
3891 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
3892 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
3893 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
3894 Idxs.slice(0, NumCommonIdxs)) {
3895 if (NumIdxs == NumInsertValueIdxs)
3896 return IVI->getInsertedValueOperand();
3904 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3905 const DataLayout &DL,
3906 const TargetLibraryInfo *TLI,
3907 const DominatorTree *DT,
3908 AssumptionCache *AC,
3909 const Instruction *CxtI) {
3910 return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, AC, CxtI),
3914 /// Given operands for an ExtractElementInst, see if we can fold the result.
3915 /// If not, this returns null.
3916 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &,
3918 if (auto *CVec = dyn_cast<Constant>(Vec)) {
3919 if (auto *CIdx = dyn_cast<Constant>(Idx))
3920 return ConstantFoldExtractElementInstruction(CVec, CIdx);
3922 // The index is not relevant if our vector is a splat.
3923 if (auto *Splat = CVec->getSplatValue())
3926 if (isa<UndefValue>(Vec))
3927 return UndefValue::get(Vec->getType()->getVectorElementType());
3930 // If extracting a specified index from the vector, see if we can recursively
3931 // find a previously computed scalar that was inserted into the vector.
3932 if (auto *IdxC = dyn_cast<ConstantInt>(Idx))
3933 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
3939 Value *llvm::SimplifyExtractElementInst(
3940 Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI,
3941 const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) {
3942 return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, AC, CxtI),
3946 /// See if we can fold the given phi. If not, returns null.
3947 static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
3948 // If all of the PHI's incoming values are the same then replace the PHI node
3949 // with the common value.
3950 Value *CommonValue = nullptr;
3951 bool HasUndefInput = false;
3952 for (Value *Incoming : PN->incoming_values()) {
3953 // If the incoming value is the phi node itself, it can safely be skipped.
3954 if (Incoming == PN) continue;
3955 if (isa<UndefValue>(Incoming)) {
3956 // Remember that we saw an undef value, but otherwise ignore them.
3957 HasUndefInput = true;
3960 if (CommonValue && Incoming != CommonValue)
3961 return nullptr; // Not the same, bail out.
3962 CommonValue = Incoming;
3965 // If CommonValue is null then all of the incoming values were either undef or
3966 // equal to the phi node itself.
3968 return UndefValue::get(PN->getType());
3970 // If we have a PHI node like phi(X, undef, X), where X is defined by some
3971 // instruction, we cannot return X as the result of the PHI node unless it
3972 // dominates the PHI block.
3974 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
3979 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
3980 Type *Ty, const Query &Q, unsigned MaxRecurse) {
3981 if (auto *C = dyn_cast<Constant>(Op))
3982 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
3984 if (auto *CI = dyn_cast<CastInst>(Op)) {
3985 auto *Src = CI->getOperand(0);
3986 Type *SrcTy = Src->getType();
3987 Type *MidTy = CI->getType();
3989 if (Src->getType() == Ty) {
3990 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
3991 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
3993 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
3995 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
3997 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
3998 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
3999 SrcIntPtrTy, MidIntPtrTy,
4000 DstIntPtrTy) == Instruction::BitCast)
4006 if (CastOpc == Instruction::BitCast)
4007 if (Op->getType() == Ty)
4013 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4014 const DataLayout &DL,
4015 const TargetLibraryInfo *TLI,
4016 const DominatorTree *DT, AssumptionCache *AC,
4017 const Instruction *CxtI) {
4018 return ::SimplifyCastInst(CastOpc, Op, Ty, Query(DL, TLI, DT, AC, CxtI),
4022 //=== Helper functions for higher up the class hierarchy.
4024 /// Given operands for a BinaryOperator, see if we can fold the result.
4025 /// If not, this returns null.
4026 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4027 const Query &Q, unsigned MaxRecurse) {
4029 case Instruction::Add:
4030 return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
4032 case Instruction::FAdd:
4033 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4035 case Instruction::Sub:
4036 return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
4038 case Instruction::FSub:
4039 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4041 case Instruction::Mul: return SimplifyMulInst (LHS, RHS, Q, MaxRecurse);
4042 case Instruction::FMul:
4043 return SimplifyFMulInst (LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4044 case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
4045 case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
4046 case Instruction::FDiv:
4047 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4048 case Instruction::SRem: return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
4049 case Instruction::URem: return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
4050 case Instruction::FRem:
4051 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4052 case Instruction::Shl:
4053 return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
4055 case Instruction::LShr:
4056 return SimplifyLShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse);
4057 case Instruction::AShr:
4058 return SimplifyAShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse);
4059 case Instruction::And: return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
4060 case Instruction::Or: return SimplifyOrInst (LHS, RHS, Q, MaxRecurse);
4061 case Instruction::Xor: return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
4063 if (Constant *CLHS = dyn_cast<Constant>(LHS))
4064 if (Constant *CRHS = dyn_cast<Constant>(RHS))
4065 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
4067 // If the operation is associative, try some generic simplifications.
4068 if (Instruction::isAssociative(Opcode))
4069 if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, Q, MaxRecurse))
4072 // If the operation is with the result of a select instruction check whether
4073 // operating on either branch of the select always yields the same value.
4074 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4075 if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, Q, MaxRecurse))
4078 // If the operation is with the result of a phi instruction, check whether
4079 // operating on all incoming values of the phi always yields the same value.
4080 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4081 if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
4088 /// Given operands for a BinaryOperator, see if we can fold the result.
4089 /// If not, this returns null.
4090 /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
4091 /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
4092 static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4093 const FastMathFlags &FMF, const Query &Q,
4094 unsigned MaxRecurse) {
4096 case Instruction::FAdd:
4097 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
4098 case Instruction::FSub:
4099 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
4100 case Instruction::FMul:
4101 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
4102 case Instruction::FDiv:
4103 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
4105 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
4109 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4110 const DataLayout &DL, const TargetLibraryInfo *TLI,
4111 const DominatorTree *DT, AssumptionCache *AC,
4112 const Instruction *CxtI) {
4113 return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
4117 Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4118 const FastMathFlags &FMF, const DataLayout &DL,
4119 const TargetLibraryInfo *TLI,
4120 const DominatorTree *DT, AssumptionCache *AC,
4121 const Instruction *CxtI) {
4122 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, AC, CxtI),
4126 /// Given operands for a CmpInst, see if we can fold the result.
4127 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4128 const Query &Q, unsigned MaxRecurse) {
4129 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
4130 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
4131 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4134 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4135 const DataLayout &DL, const TargetLibraryInfo *TLI,
4136 const DominatorTree *DT, AssumptionCache *AC,
4137 const Instruction *CxtI) {
4138 return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
4142 static bool IsIdempotent(Intrinsic::ID ID) {
4144 default: return false;
4146 // Unary idempotent: f(f(x)) = f(x)
4147 case Intrinsic::fabs:
4148 case Intrinsic::floor:
4149 case Intrinsic::ceil:
4150 case Intrinsic::trunc:
4151 case Intrinsic::rint:
4152 case Intrinsic::nearbyint:
4153 case Intrinsic::round:
4158 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
4159 const DataLayout &DL) {
4160 GlobalValue *PtrSym;
4162 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
4165 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
4166 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
4167 Type *Int32PtrTy = Int32Ty->getPointerTo();
4168 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
4170 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
4171 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
4174 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
4175 if (OffsetInt % 4 != 0)
4178 Constant *C = ConstantExpr::getGetElementPtr(
4179 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
4180 ConstantInt::get(Int64Ty, OffsetInt / 4));
4181 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
4185 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
4189 if (LoadedCE->getOpcode() == Instruction::Trunc) {
4190 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4195 if (LoadedCE->getOpcode() != Instruction::Sub)
4198 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4199 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
4201 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
4203 Constant *LoadedRHS = LoadedCE->getOperand(1);
4204 GlobalValue *LoadedRHSSym;
4205 APInt LoadedRHSOffset;
4206 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
4208 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
4211 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
4214 static bool maskIsAllZeroOrUndef(Value *Mask) {
4215 auto *ConstMask = dyn_cast<Constant>(Mask);
4218 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
4220 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
4222 if (auto *MaskElt = ConstMask->getAggregateElement(I))
4223 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
4230 template <typename IterTy>
4231 static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
4232 const Query &Q, unsigned MaxRecurse) {
4233 Intrinsic::ID IID = F->getIntrinsicID();
4234 unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
4235 Type *ReturnType = F->getReturnType();
4238 if (NumOperands == 2) {
4239 Value *LHS = *ArgBegin;
4240 Value *RHS = *(ArgBegin + 1);
4241 if (IID == Intrinsic::usub_with_overflow ||
4242 IID == Intrinsic::ssub_with_overflow) {
4243 // X - X -> { 0, false }
4245 return Constant::getNullValue(ReturnType);
4247 // X - undef -> undef
4248 // undef - X -> undef
4249 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4250 return UndefValue::get(ReturnType);
4253 if (IID == Intrinsic::uadd_with_overflow ||
4254 IID == Intrinsic::sadd_with_overflow) {
4255 // X + undef -> undef
4256 if (isa<UndefValue>(RHS))
4257 return UndefValue::get(ReturnType);
4260 if (IID == Intrinsic::umul_with_overflow ||
4261 IID == Intrinsic::smul_with_overflow) {
4262 // X * 0 -> { 0, false }
4263 if (match(RHS, m_Zero()))
4264 return Constant::getNullValue(ReturnType);
4266 // X * undef -> { 0, false }
4267 if (match(RHS, m_Undef()))
4268 return Constant::getNullValue(ReturnType);
4271 if (IID == Intrinsic::load_relative && isa<Constant>(LHS) &&
4273 return SimplifyRelativeLoad(cast<Constant>(LHS), cast<Constant>(RHS),
4277 // Simplify calls to llvm.masked.load.*
4278 if (IID == Intrinsic::masked_load) {
4279 Value *MaskArg = ArgBegin[2];
4280 Value *PassthruArg = ArgBegin[3];
4281 // If the mask is all zeros or undef, the "passthru" argument is the result.
4282 if (maskIsAllZeroOrUndef(MaskArg))
4286 // Perform idempotent optimizations
4287 if (!IsIdempotent(IID))
4291 if (NumOperands == 1)
4292 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin))
4293 if (II->getIntrinsicID() == IID)
4299 template <typename IterTy>
4300 static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
4301 const Query &Q, unsigned MaxRecurse) {
4302 Type *Ty = V->getType();
4303 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
4304 Ty = PTy->getElementType();
4305 FunctionType *FTy = cast<FunctionType>(Ty);
4307 // call undef -> undef
4308 // call null -> undef
4309 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4310 return UndefValue::get(FTy->getReturnType());
4312 Function *F = dyn_cast<Function>(V);
4316 if (F->isIntrinsic())
4317 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
4320 if (!canConstantFoldCallTo(F))
4323 SmallVector<Constant *, 4> ConstantArgs;
4324 ConstantArgs.reserve(ArgEnd - ArgBegin);
4325 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
4326 Constant *C = dyn_cast<Constant>(*I);
4329 ConstantArgs.push_back(C);
4332 return ConstantFoldCall(F, ConstantArgs, Q.TLI);
4335 Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
4336 User::op_iterator ArgEnd, const DataLayout &DL,
4337 const TargetLibraryInfo *TLI, const DominatorTree *DT,
4338 AssumptionCache *AC, const Instruction *CxtI) {
4339 return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI),
4343 Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
4344 const DataLayout &DL, const TargetLibraryInfo *TLI,
4345 const DominatorTree *DT, AssumptionCache *AC,
4346 const Instruction *CxtI) {
4347 return ::SimplifyCall(V, Args.begin(), Args.end(),
4348 Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
4351 /// See if we can compute a simplified version of this instruction.
4352 /// If not, this returns null.
4353 Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
4354 const TargetLibraryInfo *TLI,
4355 const DominatorTree *DT, AssumptionCache *AC) {
4358 switch (I->getOpcode()) {
4360 Result = ConstantFoldInstruction(I, DL, TLI);
4362 case Instruction::FAdd:
4363 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
4364 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4366 case Instruction::Add:
4367 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
4368 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4369 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
4372 case Instruction::FSub:
4373 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
4374 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4376 case Instruction::Sub:
4377 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
4378 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4379 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
4382 case Instruction::FMul:
4383 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
4384 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4386 case Instruction::Mul:
4388 SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4390 case Instruction::SDiv:
4391 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4394 case Instruction::UDiv:
4395 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4398 case Instruction::FDiv:
4399 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
4400 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4402 case Instruction::SRem:
4403 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4406 case Instruction::URem:
4407 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
4410 case Instruction::FRem:
4411 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
4412 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4414 case Instruction::Shl:
4415 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
4416 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4417 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
4420 case Instruction::LShr:
4421 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
4422 cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
4425 case Instruction::AShr:
4426 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
4427 cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
4430 case Instruction::And:
4432 SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4434 case Instruction::Or:
4436 SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4438 case Instruction::Xor:
4440 SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
4442 case Instruction::ICmp:
4444 SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), I->getOperand(0),
4445 I->getOperand(1), DL, TLI, DT, AC, I);
4447 case Instruction::FCmp:
4448 Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
4449 I->getOperand(0), I->getOperand(1),
4450 I->getFastMathFlags(), DL, TLI, DT, AC, I);
4452 case Instruction::Select:
4453 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
4454 I->getOperand(2), DL, TLI, DT, AC, I);
4456 case Instruction::GetElementPtr: {
4457 SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
4458 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
4459 Ops, DL, TLI, DT, AC, I);
4462 case Instruction::InsertValue: {
4463 InsertValueInst *IV = cast<InsertValueInst>(I);
4464 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
4465 IV->getInsertedValueOperand(),
4466 IV->getIndices(), DL, TLI, DT, AC, I);
4469 case Instruction::ExtractValue: {
4470 auto *EVI = cast<ExtractValueInst>(I);
4471 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
4472 EVI->getIndices(), DL, TLI, DT, AC, I);
4475 case Instruction::ExtractElement: {
4476 auto *EEI = cast<ExtractElementInst>(I);
4477 Result = SimplifyExtractElementInst(
4478 EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC, I);
4481 case Instruction::PHI:
4482 Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, AC, I));
4484 case Instruction::Call: {
4485 CallSite CS(cast<CallInst>(I));
4486 Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), DL,
4490 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
4491 #include "llvm/IR/Instruction.def"
4492 #undef HANDLE_CAST_INST
4493 Result = SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(),
4494 DL, TLI, DT, AC, I);
4498 // In general, it is possible for computeKnownBits to determine all bits in a
4499 // value even when the operands are not all constants.
4500 if (!Result && I->getType()->isIntOrIntVectorTy()) {
4501 unsigned BitWidth = I->getType()->getScalarSizeInBits();
4502 APInt KnownZero(BitWidth, 0);
4503 APInt KnownOne(BitWidth, 0);
4504 computeKnownBits(I, KnownZero, KnownOne, DL, /*Depth*/0, AC, I, DT);
4505 if ((KnownZero | KnownOne).isAllOnesValue())
4506 Result = ConstantInt::get(I->getType(), KnownOne);
4509 /// If called on unreachable code, the above logic may report that the
4510 /// instruction simplified to itself. Make life easier for users by
4511 /// detecting that case here, returning a safe value instead.
4512 return Result == I ? UndefValue::get(I->getType()) : Result;
4515 /// \brief Implementation of recursive simplification through an instruction's
4518 /// This is the common implementation of the recursive simplification routines.
4519 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
4520 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
4521 /// instructions to process and attempt to simplify it using
4522 /// InstructionSimplify.
4524 /// This routine returns 'true' only when *it* simplifies something. The passed
4525 /// in simplified value does not count toward this.
4526 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
4527 const TargetLibraryInfo *TLI,
4528 const DominatorTree *DT,
4529 AssumptionCache *AC) {
4530 bool Simplified = false;
4531 SmallSetVector<Instruction *, 8> Worklist;
4532 const DataLayout &DL = I->getModule()->getDataLayout();
4534 // If we have an explicit value to collapse to, do that round of the
4535 // simplification loop by hand initially.
4537 for (User *U : I->users())
4539 Worklist.insert(cast<Instruction>(U));
4541 // Replace the instruction with its simplified value.
4542 I->replaceAllUsesWith(SimpleV);
4544 // Gracefully handle edge cases where the instruction is not wired into any
4546 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4547 !I->mayHaveSideEffects())
4548 I->eraseFromParent();
4553 // Note that we must test the size on each iteration, the worklist can grow.
4554 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
4557 // See if this instruction simplifies.
4558 SimpleV = SimplifyInstruction(I, DL, TLI, DT, AC);
4564 // Stash away all the uses of the old instruction so we can check them for
4565 // recursive simplifications after a RAUW. This is cheaper than checking all
4566 // uses of To on the recursive step in most cases.
4567 for (User *U : I->users())
4568 Worklist.insert(cast<Instruction>(U));
4570 // Replace the instruction with its simplified value.
4571 I->replaceAllUsesWith(SimpleV);
4573 // Gracefully handle edge cases where the instruction is not wired into any
4575 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
4576 !I->mayHaveSideEffects())
4577 I->eraseFromParent();
4582 bool llvm::recursivelySimplifyInstruction(Instruction *I,
4583 const TargetLibraryInfo *TLI,
4584 const DominatorTree *DT,
4585 AssumptionCache *AC) {
4586 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
4589 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
4590 const TargetLibraryInfo *TLI,
4591 const DominatorTree *DT,
4592 AssumptionCache *AC) {
4593 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
4594 assert(SimpleV && "Must provide a simplified value.");
4595 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);