1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions. This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/CmpInstAnalysis.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/LoopAnalysisManager.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/IR/ConstantRange.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/GetElementPtrTypeIterator.h"
35 #include "llvm/IR/GlobalAlias.h"
36 #include "llvm/IR/InstrTypes.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/IR/PatternMatch.h"
40 #include "llvm/IR/ValueHandle.h"
41 #include "llvm/Support/KnownBits.h"
44 using namespace llvm::PatternMatch;
46 #define DEBUG_TYPE "instsimplify"
48 enum { RecursionLimit = 3 };
50 STATISTIC(NumExpand, "Number of expansions");
51 STATISTIC(NumReassoc, "Number of reassociations");
53 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
54 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
55 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
56 const SimplifyQuery &, unsigned);
57 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
59 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
60 const SimplifyQuery &, unsigned);
61 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
63 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
64 const SimplifyQuery &Q, unsigned MaxRecurse);
65 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
66 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
67 static Value *SimplifyCastInst(unsigned, Value *, Type *,
68 const SimplifyQuery &, unsigned);
69 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
72 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
74 BinaryOperator::BinaryOps BinOpCode;
75 if (auto *BO = dyn_cast<BinaryOperator>(Cond))
76 BinOpCode = BO->getOpcode();
80 CmpInst::Predicate ExpectedPred, Pred1, Pred2;
81 if (BinOpCode == BinaryOperator::Or) {
82 ExpectedPred = ICmpInst::ICMP_NE;
83 } else if (BinOpCode == BinaryOperator::And) {
84 ExpectedPred = ICmpInst::ICMP_EQ;
88 // %A = icmp eq %TV, %FV
89 // %B = icmp eq %X, %Y (and one of these is a select operand)
91 // %D = select %C, %TV, %FV
95 // %A = icmp ne %TV, %FV
96 // %B = icmp ne %X, %Y (and one of these is a select operand)
98 // %D = select %C, %TV, %FV
102 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
103 m_Specific(FalseVal)),
104 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
105 Pred1 != Pred2 || Pred1 != ExpectedPred)
108 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
109 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
114 /// For a boolean type or a vector of boolean type, return false or a vector
115 /// with every element false.
116 static Constant *getFalse(Type *Ty) {
117 return ConstantInt::getFalse(Ty);
120 /// For a boolean type or a vector of boolean type, return true or a vector
121 /// with every element true.
122 static Constant *getTrue(Type *Ty) {
123 return ConstantInt::getTrue(Ty);
126 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
127 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
129 CmpInst *Cmp = dyn_cast<CmpInst>(V);
132 CmpInst::Predicate CPred = Cmp->getPredicate();
133 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
134 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
136 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
140 /// Simplify comparison with true or false branch of select:
141 /// %sel = select i1 %cond, i32 %tv, i32 %fv
142 /// %cmp = icmp sle i32 %sel, %rhs
143 /// Compose new comparison by substituting %sel with either %tv or %fv
144 /// and see if it simplifies.
145 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
146 Value *RHS, Value *Cond,
147 const SimplifyQuery &Q, unsigned MaxRecurse,
148 Constant *TrueOrFalse) {
149 Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
150 if (SimplifiedCmp == Cond) {
151 // %cmp simplified to the select condition (%cond).
153 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
154 // It didn't simplify. However, if composed comparison is equivalent
155 // to the select condition (%cond) then we can replace it.
158 return SimplifiedCmp;
161 /// Simplify comparison with true branch of select
162 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
163 Value *RHS, Value *Cond,
164 const SimplifyQuery &Q,
165 unsigned MaxRecurse) {
166 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
167 getTrue(Cond->getType()));
170 /// Simplify comparison with false branch of select
171 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
172 Value *RHS, Value *Cond,
173 const SimplifyQuery &Q,
174 unsigned MaxRecurse) {
175 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
176 getFalse(Cond->getType()));
179 /// We know comparison with both branches of select can be simplified, but they
180 /// are not equal. This routine handles some logical simplifications.
181 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
183 const SimplifyQuery &Q,
184 unsigned MaxRecurse) {
185 // If the false value simplified to false, then the result of the compare
186 // is equal to "Cond && TCmp". This also catches the case when the false
187 // value simplified to false and the true value to true, returning "Cond".
188 if (match(FCmp, m_Zero()))
189 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
191 // If the true value simplified to true, then the result of the compare
192 // is equal to "Cond || FCmp".
193 if (match(TCmp, m_One()))
194 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
196 // Finally, if the false value simplified to true and the true value to
197 // false, then the result of the compare is equal to "!Cond".
198 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
199 if (Value *V = SimplifyXorInst(
200 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
205 /// Does the given value dominate the specified phi node?
206 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
207 Instruction *I = dyn_cast<Instruction>(V);
209 // Arguments and constants dominate all instructions.
212 // If we are processing instructions (and/or basic blocks) that have not been
213 // fully added to a function, the parent nodes may still be null. Simply
214 // return the conservative answer in these cases.
215 if (!I->getParent() || !P->getParent() || !I->getFunction())
218 // If we have a DominatorTree then do a precise test.
220 return DT->dominates(I, P);
222 // Otherwise, if the instruction is in the entry block and is not an invoke,
223 // then it obviously dominates all phi nodes.
224 if (I->getParent() == &I->getFunction()->getEntryBlock() &&
225 !isa<InvokeInst>(I) && !isa<CallBrInst>(I))
231 /// Simplify "A op (B op' C)" by distributing op over op', turning it into
232 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
233 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
234 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
235 /// Returns the simplified value, or null if no simplification was performed.
236 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
237 Instruction::BinaryOps OpcodeToExpand,
238 const SimplifyQuery &Q, unsigned MaxRecurse) {
239 // Recursion is always used, so bail out at once if we already hit the limit.
243 // Check whether the expression has the form "(A op' B) op C".
244 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
245 if (Op0->getOpcode() == OpcodeToExpand) {
246 // It does! Try turning it into "(A op C) op' (B op C)".
247 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
248 // Do "A op C" and "B op C" both simplify?
249 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
250 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
251 // They do! Return "L op' R" if it simplifies or is already available.
252 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
253 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
254 && L == B && R == A)) {
258 // Otherwise return "L op' R" if it simplifies.
259 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
266 // Check whether the expression has the form "A op (B op' C)".
267 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
268 if (Op1->getOpcode() == OpcodeToExpand) {
269 // It does! Try turning it into "(A op B) op' (A op C)".
270 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
271 // Do "A op B" and "A op C" both simplify?
272 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
273 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
274 // They do! Return "L op' R" if it simplifies or is already available.
275 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
276 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
277 && L == C && R == B)) {
281 // Otherwise return "L op' R" if it simplifies.
282 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
292 /// Generic simplifications for associative binary operations.
293 /// Returns the simpler value, or null if none was found.
294 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
295 Value *LHS, Value *RHS,
296 const SimplifyQuery &Q,
297 unsigned MaxRecurse) {
298 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
300 // Recursion is always used, so bail out at once if we already hit the limit.
304 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
305 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
307 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
308 if (Op0 && Op0->getOpcode() == Opcode) {
309 Value *A = Op0->getOperand(0);
310 Value *B = Op0->getOperand(1);
313 // Does "B op C" simplify?
314 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
315 // It does! Return "A op V" if it simplifies or is already available.
316 // If V equals B then "A op V" is just the LHS.
317 if (V == B) return LHS;
318 // Otherwise return "A op V" if it simplifies.
319 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
326 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
327 if (Op1 && Op1->getOpcode() == Opcode) {
329 Value *B = Op1->getOperand(0);
330 Value *C = Op1->getOperand(1);
332 // Does "A op B" simplify?
333 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
334 // It does! Return "V op C" if it simplifies or is already available.
335 // If V equals B then "V op C" is just the RHS.
336 if (V == B) return RHS;
337 // Otherwise return "V op C" if it simplifies.
338 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
345 // The remaining transforms require commutativity as well as associativity.
346 if (!Instruction::isCommutative(Opcode))
349 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
350 if (Op0 && Op0->getOpcode() == Opcode) {
351 Value *A = Op0->getOperand(0);
352 Value *B = Op0->getOperand(1);
355 // Does "C op A" simplify?
356 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
357 // It does! Return "V op B" if it simplifies or is already available.
358 // If V equals A then "V op B" is just the LHS.
359 if (V == A) return LHS;
360 // Otherwise return "V op B" if it simplifies.
361 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
368 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
369 if (Op1 && Op1->getOpcode() == Opcode) {
371 Value *B = Op1->getOperand(0);
372 Value *C = Op1->getOperand(1);
374 // Does "C op A" simplify?
375 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
376 // It does! Return "B op V" if it simplifies or is already available.
377 // If V equals C then "B op V" is just the RHS.
378 if (V == C) return RHS;
379 // Otherwise return "B op V" if it simplifies.
380 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
390 /// In the case of a binary operation with a select instruction as an operand,
391 /// try to simplify the binop by seeing whether evaluating it on both branches
392 /// of the select results in the same value. Returns the common value if so,
393 /// otherwise returns null.
394 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
395 Value *RHS, const SimplifyQuery &Q,
396 unsigned MaxRecurse) {
397 // Recursion is always used, so bail out at once if we already hit the limit.
402 if (isa<SelectInst>(LHS)) {
403 SI = cast<SelectInst>(LHS);
405 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
406 SI = cast<SelectInst>(RHS);
409 // Evaluate the BinOp on the true and false branches of the select.
413 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
414 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
416 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
417 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
420 // If they simplified to the same value, then return the common value.
421 // If they both failed to simplify then return null.
425 // If one branch simplified to undef, return the other one.
426 if (TV && isa<UndefValue>(TV))
428 if (FV && isa<UndefValue>(FV))
431 // If applying the operation did not change the true and false select values,
432 // then the result of the binop is the select itself.
433 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
436 // If one branch simplified and the other did not, and the simplified
437 // value is equal to the unsimplified one, return the simplified value.
438 // For example, select (cond, X, X & Z) & Z -> X & Z.
439 if ((FV && !TV) || (TV && !FV)) {
440 // Check that the simplified value has the form "X op Y" where "op" is the
441 // same as the original operation.
442 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
443 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
444 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
445 // We already know that "op" is the same as for the simplified value. See
446 // if the operands match too. If so, return the simplified value.
447 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
448 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
449 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
450 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
451 Simplified->getOperand(1) == UnsimplifiedRHS)
453 if (Simplified->isCommutative() &&
454 Simplified->getOperand(1) == UnsimplifiedLHS &&
455 Simplified->getOperand(0) == UnsimplifiedRHS)
463 /// In the case of a comparison with a select instruction, try to simplify the
464 /// comparison by seeing whether both branches of the select result in the same
465 /// value. Returns the common value if so, otherwise returns null.
466 /// For example, if we have:
467 /// %tmp = select i1 %cmp, i32 1, i32 2
468 /// %cmp1 = icmp sle i32 %tmp, 3
469 /// We can simplify %cmp1 to true, because both branches of select are
470 /// less than 3. We compose new comparison by substituting %tmp with both
471 /// branches of select and see if it can be simplified.
472 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
473 Value *RHS, const SimplifyQuery &Q,
474 unsigned MaxRecurse) {
475 // Recursion is always used, so bail out at once if we already hit the limit.
479 // Make sure the select is on the LHS.
480 if (!isa<SelectInst>(LHS)) {
482 Pred = CmpInst::getSwappedPredicate(Pred);
484 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
485 SelectInst *SI = cast<SelectInst>(LHS);
486 Value *Cond = SI->getCondition();
487 Value *TV = SI->getTrueValue();
488 Value *FV = SI->getFalseValue();
490 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
491 // Does "cmp TV, RHS" simplify?
492 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
496 // Does "cmp FV, RHS" simplify?
497 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
501 // If both sides simplified to the same value, then use it as the result of
502 // the original comparison.
506 // The remaining cases only make sense if the select condition has the same
507 // type as the result of the comparison, so bail out if this is not so.
508 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
509 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
514 /// In the case of a binary operation with an operand that is a PHI instruction,
515 /// try to simplify the binop by seeing whether evaluating it on the incoming
516 /// phi values yields the same result for every value. If so returns the common
517 /// value, otherwise returns null.
518 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
519 Value *RHS, const SimplifyQuery &Q,
520 unsigned MaxRecurse) {
521 // Recursion is always used, so bail out at once if we already hit the limit.
526 if (isa<PHINode>(LHS)) {
527 PI = cast<PHINode>(LHS);
528 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
529 if (!valueDominatesPHI(RHS, PI, Q.DT))
532 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
533 PI = cast<PHINode>(RHS);
534 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
535 if (!valueDominatesPHI(LHS, PI, Q.DT))
539 // Evaluate the BinOp on the incoming phi values.
540 Value *CommonValue = nullptr;
541 for (Value *Incoming : PI->incoming_values()) {
542 // If the incoming value is the phi node itself, it can safely be skipped.
543 if (Incoming == PI) continue;
544 Value *V = PI == LHS ?
545 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
546 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
547 // If the operation failed to simplify, or simplified to a different value
548 // to previously, then give up.
549 if (!V || (CommonValue && V != CommonValue))
557 /// In the case of a comparison with a PHI instruction, try to simplify the
558 /// comparison by seeing whether comparing with all of the incoming phi values
559 /// yields the same result every time. If so returns the common result,
560 /// otherwise returns null.
561 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
562 const SimplifyQuery &Q, unsigned MaxRecurse) {
563 // Recursion is always used, so bail out at once if we already hit the limit.
567 // Make sure the phi is on the LHS.
568 if (!isa<PHINode>(LHS)) {
570 Pred = CmpInst::getSwappedPredicate(Pred);
572 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
573 PHINode *PI = cast<PHINode>(LHS);
575 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
576 if (!valueDominatesPHI(RHS, PI, Q.DT))
579 // Evaluate the BinOp on the incoming phi values.
580 Value *CommonValue = nullptr;
581 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
582 Value *Incoming = PI->getIncomingValue(u);
583 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
584 // If the incoming value is the phi node itself, it can safely be skipped.
585 if (Incoming == PI) continue;
586 // Change the context instruction to the "edge" that flows into the phi.
587 // This is important because that is where incoming is actually "evaluated"
588 // even though it is used later somewhere else.
589 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
591 // If the operation failed to simplify, or simplified to a different value
592 // to previously, then give up.
593 if (!V || (CommonValue && V != CommonValue))
601 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
602 Value *&Op0, Value *&Op1,
603 const SimplifyQuery &Q) {
604 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
605 if (auto *CRHS = dyn_cast<Constant>(Op1))
606 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
608 // Canonicalize the constant to the RHS if this is a commutative operation.
609 if (Instruction::isCommutative(Opcode))
615 /// Given operands for an Add, see if we can fold the result.
616 /// If not, this returns null.
617 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
618 const SimplifyQuery &Q, unsigned MaxRecurse) {
619 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
622 // X + undef -> undef
623 if (match(Op1, m_Undef()))
627 if (match(Op1, m_Zero()))
630 // If two operands are negative, return 0.
631 if (isKnownNegation(Op0, Op1))
632 return Constant::getNullValue(Op0->getType());
638 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
639 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
642 // X + ~X -> -1 since ~X = -X-1
643 Type *Ty = Op0->getType();
644 if (match(Op0, m_Not(m_Specific(Op1))) ||
645 match(Op1, m_Not(m_Specific(Op0))))
646 return Constant::getAllOnesValue(Ty);
648 // add nsw/nuw (xor Y, signmask), signmask --> Y
649 // The no-wrapping add guarantees that the top bit will be set by the add.
650 // Therefore, the xor must be clearing the already set sign bit of Y.
651 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
652 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
655 // add nuw %x, -1 -> -1, because %x can only be 0.
656 if (IsNUW && match(Op1, m_AllOnes()))
657 return Op1; // Which is -1.
660 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
661 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
664 // Try some generic simplifications for associative operations.
665 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
669 // Threading Add over selects and phi nodes is pointless, so don't bother.
670 // Threading over the select in "A + select(cond, B, C)" means evaluating
671 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
672 // only if B and C are equal. If B and C are equal then (since we assume
673 // that operands have already been simplified) "select(cond, B, C)" should
674 // have been simplified to the common value of B and C already. Analysing
675 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
676 // for threading over phi nodes.
681 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
682 const SimplifyQuery &Query) {
683 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
686 /// Compute the base pointer and cumulative constant offsets for V.
688 /// This strips all constant offsets off of V, leaving it the base pointer, and
689 /// accumulates the total constant offset applied in the returned constant. It
690 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
691 /// no constant offsets applied.
693 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
694 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
696 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
697 bool AllowNonInbounds = false) {
698 assert(V->getType()->isPtrOrPtrVectorTy());
700 Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
701 APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
703 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
704 // As that strip may trace through `addrspacecast`, need to sext or trunc
705 // the offset calculated.
706 IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
707 Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth());
709 Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset);
710 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType()))
711 return ConstantVector::getSplat(VecTy->getElementCount(), OffsetIntPtr);
715 /// Compute the constant difference between two pointer values.
716 /// If the difference is not a constant, returns zero.
717 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
719 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
720 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
722 // If LHS and RHS are not related via constant offsets to the same base
723 // value, there is nothing we can do here.
727 // Otherwise, the difference of LHS - RHS can be computed as:
729 // = (LHSOffset + Base) - (RHSOffset + Base)
730 // = LHSOffset - RHSOffset
731 return ConstantExpr::getSub(LHSOffset, RHSOffset);
734 /// Given operands for a Sub, see if we can fold the result.
735 /// If not, this returns null.
736 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
737 const SimplifyQuery &Q, unsigned MaxRecurse) {
738 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
741 // X - undef -> undef
742 // undef - X -> undef
743 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
744 return UndefValue::get(Op0->getType());
747 if (match(Op1, m_Zero()))
752 return Constant::getNullValue(Op0->getType());
754 // Is this a negation?
755 if (match(Op0, m_Zero())) {
756 // 0 - X -> 0 if the sub is NUW.
758 return Constant::getNullValue(Op0->getType());
760 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
761 if (Known.Zero.isMaxSignedValue()) {
762 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
763 // Op1 must be 0 because negating the minimum signed value is undefined.
765 return Constant::getNullValue(Op0->getType());
767 // 0 - X -> X if X is 0 or the minimum signed value.
772 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
773 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
774 Value *X = nullptr, *Y = nullptr, *Z = Op1;
775 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
776 // See if "V === Y - Z" simplifies.
777 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
778 // It does! Now see if "X + V" simplifies.
779 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
780 // It does, we successfully reassociated!
784 // See if "V === X - Z" simplifies.
785 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
786 // It does! Now see if "Y + V" simplifies.
787 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
788 // It does, we successfully reassociated!
794 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
795 // For example, X - (X + 1) -> -1
797 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
798 // See if "V === X - Y" simplifies.
799 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
800 // It does! Now see if "V - Z" simplifies.
801 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
802 // It does, we successfully reassociated!
806 // See if "V === X - Z" simplifies.
807 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
808 // It does! Now see if "V - Y" simplifies.
809 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
810 // It does, we successfully reassociated!
816 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
817 // For example, X - (X - Y) -> Y.
819 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
820 // See if "V === Z - X" simplifies.
821 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
822 // It does! Now see if "V + Y" simplifies.
823 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
824 // It does, we successfully reassociated!
829 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
830 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
831 match(Op1, m_Trunc(m_Value(Y))))
832 if (X->getType() == Y->getType())
833 // See if "V === X - Y" simplifies.
834 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
835 // It does! Now see if "trunc V" simplifies.
836 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
838 // It does, return the simplified "trunc V".
841 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
842 if (match(Op0, m_PtrToInt(m_Value(X))) &&
843 match(Op1, m_PtrToInt(m_Value(Y))))
844 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
845 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
848 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
849 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
852 // Threading Sub over selects and phi nodes is pointless, so don't bother.
853 // Threading over the select in "A - select(cond, B, C)" means evaluating
854 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
855 // only if B and C are equal. If B and C are equal then (since we assume
856 // that operands have already been simplified) "select(cond, B, C)" should
857 // have been simplified to the common value of B and C already. Analysing
858 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
859 // for threading over phi nodes.
864 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
865 const SimplifyQuery &Q) {
866 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
869 /// Given operands for a Mul, see if we can fold the result.
870 /// If not, this returns null.
871 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
872 unsigned MaxRecurse) {
873 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
878 if (match(Op1, m_CombineOr(m_Undef(), m_Zero())))
879 return Constant::getNullValue(Op0->getType());
882 if (match(Op1, m_One()))
885 // (X / Y) * Y -> X if the division is exact.
887 if (Q.IIQ.UseInstrInfo &&
889 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
890 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
894 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
895 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
898 // Try some generic simplifications for associative operations.
899 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
903 // Mul distributes over Add. Try some generic simplifications based on this.
904 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
908 // If the operation is with the result of a select instruction, check whether
909 // operating on either branch of the select always yields the same value.
910 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
911 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
915 // If the operation is with the result of a phi instruction, check whether
916 // operating on all incoming values of the phi always yields the same value.
917 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
918 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
925 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
926 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
929 /// Check for common or similar folds of integer division or integer remainder.
930 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
931 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
932 Type *Ty = Op0->getType();
934 // X / undef -> undef
935 // X % undef -> undef
936 if (match(Op1, m_Undef()))
941 // We don't need to preserve faults!
942 if (match(Op1, m_Zero()))
943 return UndefValue::get(Ty);
945 // If any element of a constant divisor fixed width vector is zero or undef,
946 // the whole op is undef.
947 auto *Op1C = dyn_cast<Constant>(Op1);
948 auto *VTy = dyn_cast<FixedVectorType>(Ty);
950 unsigned NumElts = VTy->getNumElements();
951 for (unsigned i = 0; i != NumElts; ++i) {
952 Constant *Elt = Op1C->getAggregateElement(i);
953 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt)))
954 return UndefValue::get(Ty);
960 if (match(Op0, m_Undef()))
961 return Constant::getNullValue(Ty);
965 if (match(Op0, m_Zero()))
966 return Constant::getNullValue(Op0->getType());
971 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
975 // If this is a boolean op (single-bit element type), we can't have
976 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
977 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
979 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
980 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
981 return IsDiv ? Op0 : Constant::getNullValue(Ty);
986 /// Given a predicate and two operands, return true if the comparison is true.
987 /// This is a helper for div/rem simplification where we return some other value
988 /// when we can prove a relationship between the operands.
989 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
990 const SimplifyQuery &Q, unsigned MaxRecurse) {
991 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
992 Constant *C = dyn_cast_or_null<Constant>(V);
993 return (C && C->isAllOnesValue());
996 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
997 /// to simplify X % Y to X.
998 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
999 unsigned MaxRecurse, bool IsSigned) {
1000 // Recursion is always used, so bail out at once if we already hit the limit.
1007 // We require that 1 operand is a simple constant. That could be extended to
1008 // 2 variables if we computed the sign bit for each.
1010 // Make sure that a constant is not the minimum signed value because taking
1011 // the abs() of that is undefined.
1012 Type *Ty = X->getType();
1014 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1015 // Is the variable divisor magnitude always greater than the constant
1016 // dividend magnitude?
1017 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1018 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1019 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1020 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1021 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1024 if (match(Y, m_APInt(C))) {
1025 // Special-case: we can't take the abs() of a minimum signed value. If
1026 // that's the divisor, then all we have to do is prove that the dividend
1027 // is also not the minimum signed value.
1028 if (C->isMinSignedValue())
1029 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1031 // Is the variable dividend magnitude always less than the constant
1032 // divisor magnitude?
1033 // |X| < |C| --> X > -abs(C) and X < abs(C)
1034 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1035 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1036 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1037 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1043 // IsSigned == false.
1044 // Is the dividend unsigned less than the divisor?
1045 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1048 /// These are simplifications common to SDiv and UDiv.
1049 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1050 const SimplifyQuery &Q, unsigned MaxRecurse) {
1051 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1054 if (Value *V = simplifyDivRem(Op0, Op1, true))
1057 bool IsSigned = Opcode == Instruction::SDiv;
1059 // (X * Y) / Y -> X if the multiplication does not overflow.
1061 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1062 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1063 // If the Mul does not overflow, then we are good to go.
1064 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1065 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)))
1067 // If X has the form X = A / Y, then X * Y cannot overflow.
1068 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1069 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1)))))
1073 // (X rem Y) / Y -> 0
1074 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1075 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1076 return Constant::getNullValue(Op0->getType());
1078 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1079 ConstantInt *C1, *C2;
1080 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1081 match(Op1, m_ConstantInt(C2))) {
1083 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1085 return Constant::getNullValue(Op0->getType());
1088 // If the operation is with the result of a select instruction, check whether
1089 // operating on either branch of the select always yields the same value.
1090 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1091 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1094 // If the operation is with the result of a phi instruction, check whether
1095 // operating on all incoming values of the phi always yields the same value.
1096 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1097 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1100 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1101 return Constant::getNullValue(Op0->getType());
1106 /// These are simplifications common to SRem and URem.
1107 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1108 const SimplifyQuery &Q, unsigned MaxRecurse) {
1109 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1112 if (Value *V = simplifyDivRem(Op0, Op1, false))
1115 // (X % Y) % Y -> X % Y
1116 if ((Opcode == Instruction::SRem &&
1117 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1118 (Opcode == Instruction::URem &&
1119 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1122 // (X << Y) % X -> 0
1123 if (Q.IIQ.UseInstrInfo &&
1124 ((Opcode == Instruction::SRem &&
1125 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1126 (Opcode == Instruction::URem &&
1127 match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1128 return Constant::getNullValue(Op0->getType());
1130 // If the operation is with the result of a select instruction, check whether
1131 // operating on either branch of the select always yields the same value.
1132 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1133 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1136 // If the operation is with the result of a phi instruction, check whether
1137 // operating on all incoming values of the phi always yields the same value.
1138 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1139 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1142 // If X / Y == 0, then X % Y == X.
1143 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1149 /// Given operands for an SDiv, see if we can fold the result.
1150 /// If not, this returns null.
1151 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1152 unsigned MaxRecurse) {
1153 // If two operands are negated and no signed overflow, return -1.
1154 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1155 return Constant::getAllOnesValue(Op0->getType());
1157 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1160 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1161 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1164 /// Given operands for a UDiv, see if we can fold the result.
1165 /// If not, this returns null.
1166 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1167 unsigned MaxRecurse) {
1168 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1171 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1172 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1175 /// Given operands for an SRem, see if we can fold the result.
1176 /// If not, this returns null.
1177 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1178 unsigned MaxRecurse) {
1179 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1180 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1182 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1183 return ConstantInt::getNullValue(Op0->getType());
1185 // If the two operands are negated, return 0.
1186 if (isKnownNegation(Op0, Op1))
1187 return ConstantInt::getNullValue(Op0->getType());
1189 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1192 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1193 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1196 /// Given operands for a URem, see if we can fold the result.
1197 /// If not, this returns null.
1198 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1199 unsigned MaxRecurse) {
1200 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1203 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1204 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1207 /// Returns true if a shift by \c Amount always yields undef.
1208 static bool isUndefShift(Value *Amount) {
1209 Constant *C = dyn_cast<Constant>(Amount);
1213 // X shift by undef -> undef because it may shift by the bitwidth.
1214 if (isa<UndefValue>(C))
1217 // Shifting by the bitwidth or more is undefined.
1218 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1219 if (CI->getValue().getLimitedValue() >=
1220 CI->getType()->getScalarSizeInBits())
1223 // If all lanes of a vector shift are undefined the whole shift is.
1224 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1225 for (unsigned I = 0, E = cast<VectorType>(C->getType())->getNumElements();
1227 if (!isUndefShift(C->getAggregateElement(I)))
1235 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1236 /// If not, this returns null.
1237 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1238 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) {
1239 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1242 // 0 shift by X -> 0
1243 if (match(Op0, m_Zero()))
1244 return Constant::getNullValue(Op0->getType());
1246 // X shift by 0 -> X
1247 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1250 if (match(Op1, m_Zero()) ||
1251 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1254 // Fold undefined shifts.
1255 if (isUndefShift(Op1))
1256 return UndefValue::get(Op0->getType());
1258 // If the operation is with the result of a select instruction, check whether
1259 // operating on either branch of the select always yields the same value.
1260 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1261 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1264 // If the operation is with the result of a phi instruction, check whether
1265 // operating on all incoming values of the phi always yields the same value.
1266 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1267 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1270 // If any bits in the shift amount make that value greater than or equal to
1271 // the number of bits in the type, the shift is undefined.
1272 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1273 if (Known.One.getLimitedValue() >= Known.getBitWidth())
1274 return UndefValue::get(Op0->getType());
1276 // If all valid bits in the shift amount are known zero, the first operand is
1278 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
1279 if (Known.countMinTrailingZeros() >= NumValidShiftBits)
1285 /// Given operands for an Shl, LShr or AShr, see if we can
1286 /// fold the result. If not, this returns null.
1287 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1288 Value *Op1, bool isExact, const SimplifyQuery &Q,
1289 unsigned MaxRecurse) {
1290 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
1295 return Constant::getNullValue(Op0->getType());
1298 // undef >> X -> undef (if it's exact)
1299 if (match(Op0, m_Undef()))
1300 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1302 // The low bit cannot be shifted out of an exact shift if it is set.
1304 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1305 if (Op0Known.One[0])
1312 /// Given operands for an Shl, see if we can fold the result.
1313 /// If not, this returns null.
1314 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1315 const SimplifyQuery &Q, unsigned MaxRecurse) {
1316 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
1320 // undef << X -> undef if (if it's NSW/NUW)
1321 if (match(Op0, m_Undef()))
1322 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1324 // (X >> A) << A -> X
1326 if (Q.IIQ.UseInstrInfo &&
1327 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1330 // shl nuw i8 C, %x -> C iff C has sign bit set.
1331 if (isNUW && match(Op0, m_Negative()))
1333 // NOTE: could use computeKnownBits() / LazyValueInfo,
1334 // but the cost-benefit analysis suggests it isn't worth it.
1339 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1340 const SimplifyQuery &Q) {
1341 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1344 /// Given operands for an LShr, see if we can fold the result.
1345 /// If not, this returns null.
1346 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1347 const SimplifyQuery &Q, unsigned MaxRecurse) {
1348 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1352 // (X << A) >> A -> X
1354 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1357 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1358 // We can return X as we do in the above case since OR alters no bits in X.
1359 // SimplifyDemandedBits in InstCombine can do more general optimization for
1360 // bit manipulation. This pattern aims to provide opportunities for other
1361 // optimizers by supporting a simple but common case in InstSimplify.
1363 const APInt *ShRAmt, *ShLAmt;
1364 if (match(Op1, m_APInt(ShRAmt)) &&
1365 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1366 *ShRAmt == *ShLAmt) {
1367 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1368 const unsigned Width = Op0->getType()->getScalarSizeInBits();
1369 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
1370 if (ShRAmt->uge(EffWidthY))
1377 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1378 const SimplifyQuery &Q) {
1379 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1382 /// Given operands for an AShr, see if we can fold the result.
1383 /// If not, this returns null.
1384 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1385 const SimplifyQuery &Q, unsigned MaxRecurse) {
1386 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1390 // all ones >>a X -> -1
1391 // Do not return Op0 because it may contain undef elements if it's a vector.
1392 if (match(Op0, m_AllOnes()))
1393 return Constant::getAllOnesValue(Op0->getType());
1395 // (X << A) >> A -> X
1397 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1400 // Arithmetic shifting an all-sign-bit value is a no-op.
1401 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1402 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1408 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1409 const SimplifyQuery &Q) {
1410 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1413 /// Commuted variants are assumed to be handled by calling this function again
1414 /// with the parameters swapped.
1415 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1416 ICmpInst *UnsignedICmp, bool IsAnd,
1417 const SimplifyQuery &Q) {
1420 ICmpInst::Predicate EqPred;
1421 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1422 !ICmpInst::isEquality(EqPred))
1425 ICmpInst::Predicate UnsignedPred;
1429 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1430 if (match(UnsignedICmp,
1431 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1432 ICmpInst::isUnsigned(UnsignedPred)) {
1433 // A >=/<= B || (A - B) != 0 <--> true
1434 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1435 UnsignedPred == ICmpInst::ICMP_ULE) &&
1436 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1437 return ConstantInt::getTrue(UnsignedICmp->getType());
1438 // A </> B && (A - B) == 0 <--> false
1439 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1440 UnsignedPred == ICmpInst::ICMP_UGT) &&
1441 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1442 return ConstantInt::getFalse(UnsignedICmp->getType());
1444 // A </> B && (A - B) != 0 <--> A </> B
1445 // A </> B || (A - B) != 0 <--> (A - B) != 0
1446 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1447 UnsignedPred == ICmpInst::ICMP_UGT))
1448 return IsAnd ? UnsignedICmp : ZeroICmp;
1450 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1451 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1452 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1453 UnsignedPred == ICmpInst::ICMP_UGE))
1454 return IsAnd ? ZeroICmp : UnsignedICmp;
1457 // Given Y = (A - B)
1458 // Y >= A && Y != 0 --> Y >= A iff B != 0
1459 // Y < A || Y == 0 --> Y < A iff B != 0
1460 if (match(UnsignedICmp,
1461 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1462 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1463 EqPred == ICmpInst::ICMP_NE &&
1464 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1465 return UnsignedICmp;
1466 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1467 EqPred == ICmpInst::ICMP_EQ &&
1468 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1469 return UnsignedICmp;
1473 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1474 ICmpInst::isUnsigned(UnsignedPred))
1476 else if (match(UnsignedICmp,
1477 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1478 ICmpInst::isUnsigned(UnsignedPred))
1479 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1483 // X > Y && Y == 0 --> Y == 0 iff X != 0
1484 // X > Y || Y == 0 --> X > Y iff X != 0
1485 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1486 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1487 return IsAnd ? ZeroICmp : UnsignedICmp;
1489 // X <= Y && Y != 0 --> X <= Y iff X != 0
1490 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1491 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1492 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1493 return IsAnd ? UnsignedICmp : ZeroICmp;
1495 // The transforms below here are expected to be handled more generally with
1496 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1497 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1498 // these are candidates for removal.
1500 // X < Y && Y != 0 --> X < Y
1501 // X < Y || Y != 0 --> Y != 0
1502 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1503 return IsAnd ? UnsignedICmp : ZeroICmp;
1505 // X >= Y && Y == 0 --> Y == 0
1506 // X >= Y || Y == 0 --> X >= Y
1507 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1508 return IsAnd ? ZeroICmp : UnsignedICmp;
1510 // X < Y && Y == 0 --> false
1511 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1513 return getFalse(UnsignedICmp->getType());
1515 // X >= Y || Y != 0 --> true
1516 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1518 return getTrue(UnsignedICmp->getType());
1523 /// Commuted variants are assumed to be handled by calling this function again
1524 /// with the parameters swapped.
1525 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1526 ICmpInst::Predicate Pred0, Pred1;
1528 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1529 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1532 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1533 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1534 // can eliminate Op1 from this 'and'.
1535 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1538 // Check for any combination of predicates that are guaranteed to be disjoint.
1539 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1540 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1541 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1542 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1543 return getFalse(Op0->getType());
1548 /// Commuted variants are assumed to be handled by calling this function again
1549 /// with the parameters swapped.
1550 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1551 ICmpInst::Predicate Pred0, Pred1;
1553 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1554 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1557 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1558 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1559 // can eliminate Op0 from this 'or'.
1560 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1563 // Check for any combination of predicates that cover the entire range of
1565 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1566 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1567 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1568 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1569 return getTrue(Op0->getType());
1574 /// Test if a pair of compares with a shared operand and 2 constants has an
1575 /// empty set intersection, full set union, or if one compare is a superset of
1577 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1579 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1580 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1583 const APInt *C0, *C1;
1584 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1585 !match(Cmp1->getOperand(1), m_APInt(C1)))
1588 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1589 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1591 // For and-of-compares, check if the intersection is empty:
1592 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1593 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1594 return getFalse(Cmp0->getType());
1596 // For or-of-compares, check if the union is full:
1597 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1598 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1599 return getTrue(Cmp0->getType());
1601 // Is one range a superset of the other?
1602 // If this is and-of-compares, take the smaller set:
1603 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1604 // If this is or-of-compares, take the larger set:
1605 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1606 if (Range0.contains(Range1))
1607 return IsAnd ? Cmp1 : Cmp0;
1608 if (Range1.contains(Range0))
1609 return IsAnd ? Cmp0 : Cmp1;
1614 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1616 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1617 if (!match(Cmp0->getOperand(1), m_Zero()) ||
1618 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1621 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1624 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1625 Value *X = Cmp0->getOperand(0);
1626 Value *Y = Cmp1->getOperand(0);
1628 // If one of the compares is a masked version of a (not) null check, then
1629 // that compare implies the other, so we eliminate the other. Optionally, look
1630 // through a pointer-to-int cast to match a null check of a pointer type.
1632 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1633 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1634 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1635 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1636 if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1637 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1640 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1641 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1642 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1643 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1644 if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1645 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1651 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1652 const InstrInfoQuery &IIQ) {
1653 // (icmp (add V, C0), C1) & (icmp V, C0)
1654 ICmpInst::Predicate Pred0, Pred1;
1655 const APInt *C0, *C1;
1657 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1660 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1663 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1664 if (AddInst->getOperand(1) != Op1->getOperand(1))
1667 Type *ITy = Op0->getType();
1668 bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1669 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1671 const APInt Delta = *C1 - *C0;
1672 if (C0->isStrictlyPositive()) {
1674 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1675 return getFalse(ITy);
1676 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1677 return getFalse(ITy);
1680 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1681 return getFalse(ITy);
1682 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1683 return getFalse(ITy);
1686 if (C0->getBoolValue() && isNUW) {
1688 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1689 return getFalse(ITy);
1691 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1692 return getFalse(ITy);
1698 /// Try to eliminate compares with signed or unsigned min/max constants.
1699 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1701 // Canonicalize an equality compare as Cmp0.
1702 if (Cmp1->isEquality())
1703 std::swap(Cmp0, Cmp1);
1704 if (!Cmp0->isEquality())
1707 // The equality compare must be against a constant. Convert the 'null' pointer
1708 // constant to an integer zero value.
1711 if (match(Cmp0->getOperand(1), m_APInt(C)))
1713 else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1714 MinMaxC = APInt::getNullValue(8);
1718 // The non-equality compare must include a common operand (X). Canonicalize
1719 // the common operand as operand 0 (the predicate is swapped if the common
1720 // operand was operand 1).
1721 ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1722 Value *X = Cmp0->getOperand(0);
1723 ICmpInst::Predicate Pred1;
1724 if (!match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())) ||
1725 ICmpInst::isEquality(Pred1))
1728 // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1730 Pred0 = ICmpInst::getInversePredicate(Pred0);
1731 Pred1 = ICmpInst::getInversePredicate(Pred1);
1734 // Normalize to unsigned compare and unsigned min/max value.
1735 // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1736 if (ICmpInst::isSigned(Pred1)) {
1737 Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1738 MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1741 // (X != MAX) && (X < Y) --> X < Y
1742 // (X == MAX) || (X >= Y) --> X >= Y
1743 if (MinMaxC.isMaxValue())
1744 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1747 // (X != MIN) && (X > Y) --> X > Y
1748 // (X == MIN) || (X <= Y) --> X <= Y
1749 if (MinMaxC.isMinValue())
1750 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1756 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1757 const SimplifyQuery &Q) {
1758 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1760 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1763 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1765 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1768 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1771 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1774 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1777 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1779 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1785 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1786 const InstrInfoQuery &IIQ) {
1787 // (icmp (add V, C0), C1) | (icmp V, C0)
1788 ICmpInst::Predicate Pred0, Pred1;
1789 const APInt *C0, *C1;
1791 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1794 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1797 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1798 if (AddInst->getOperand(1) != Op1->getOperand(1))
1801 Type *ITy = Op0->getType();
1802 bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1803 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1805 const APInt Delta = *C1 - *C0;
1806 if (C0->isStrictlyPositive()) {
1808 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1809 return getTrue(ITy);
1810 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1811 return getTrue(ITy);
1814 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1815 return getTrue(ITy);
1816 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1817 return getTrue(ITy);
1820 if (C0->getBoolValue() && isNUW) {
1822 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1823 return getTrue(ITy);
1825 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1826 return getTrue(ITy);
1832 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1833 const SimplifyQuery &Q) {
1834 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1836 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1839 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1841 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1844 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1847 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1850 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1853 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1855 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1861 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI,
1862 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1863 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1864 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1865 if (LHS0->getType() != RHS0->getType())
1868 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1869 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1870 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1871 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1872 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1873 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1874 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1875 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1876 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1877 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1878 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1879 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1880 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1)))
1883 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1884 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1885 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1886 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1887 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1888 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1889 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1890 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1891 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1892 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1)))
1899 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
1900 Value *Op0, Value *Op1, bool IsAnd) {
1901 // Look through casts of the 'and' operands to find compares.
1902 auto *Cast0 = dyn_cast<CastInst>(Op0);
1903 auto *Cast1 = dyn_cast<CastInst>(Op1);
1904 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1905 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1906 Op0 = Cast0->getOperand(0);
1907 Op1 = Cast1->getOperand(0);
1911 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1912 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1914 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1915 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1917 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1918 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1920 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
1927 // If we looked through casts, we can only handle a constant simplification
1928 // because we are not allowed to create a cast instruction here.
1929 if (auto *C = dyn_cast<Constant>(V))
1930 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1935 /// Check that the Op1 is in expected form, i.e.:
1936 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
1937 /// %Op1 = extractvalue { i4, i1 } %Agg, 1
1938 static bool omitCheckForZeroBeforeMulWithOverflowInternal(Value *Op1,
1940 auto *Extract = dyn_cast<ExtractValueInst>(Op1);
1941 // We should only be extracting the overflow bit.
1942 if (!Extract || !Extract->getIndices().equals(1))
1944 Value *Agg = Extract->getAggregateOperand();
1945 // This should be a multiplication-with-overflow intrinsic.
1946 if (!match(Agg, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(),
1947 m_Intrinsic<Intrinsic::smul_with_overflow>())))
1949 // One of its multipliers should be the value we checked for zero before.
1950 if (!match(Agg, m_CombineOr(m_Argument<0>(m_Specific(X)),
1951 m_Argument<1>(m_Specific(X)))))
1956 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some
1957 /// other form of check, e.g. one that was using division; it may have been
1958 /// guarded against division-by-zero. We can drop that check now.
1960 /// %Op0 = icmp ne i4 %X, 0
1961 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
1962 /// %Op1 = extractvalue { i4, i1 } %Agg, 1
1963 /// %??? = and i1 %Op0, %Op1
1964 /// We can just return %Op1
1965 static Value *omitCheckForZeroBeforeMulWithOverflow(Value *Op0, Value *Op1) {
1966 ICmpInst::Predicate Pred;
1968 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) ||
1969 Pred != ICmpInst::Predicate::ICMP_NE)
1971 // Is Op1 in expected form?
1972 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X))
1974 // Can omit 'and', and just return the overflow bit.
1978 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some
1979 /// other form of check, e.g. one that was using division; it may have been
1980 /// guarded against division-by-zero. We can drop that check now.
1982 /// %Op0 = icmp eq i4 %X, 0
1983 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
1984 /// %Op1 = extractvalue { i4, i1 } %Agg, 1
1985 /// %NotOp1 = xor i1 %Op1, true
1986 /// %or = or i1 %Op0, %NotOp1
1987 /// We can just return %NotOp1
1988 static Value *omitCheckForZeroBeforeInvertedMulWithOverflow(Value *Op0,
1990 ICmpInst::Predicate Pred;
1992 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) ||
1993 Pred != ICmpInst::Predicate::ICMP_EQ)
1995 // We expect the other hand of an 'or' to be a 'not'.
1997 if (!match(NotOp1, m_Not(m_Value(Op1))))
1999 // Is Op1 in expected form?
2000 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X))
2002 // Can omit 'and', and just return the inverted overflow bit.
2006 /// Given operands for an And, see if we can fold the result.
2007 /// If not, this returns null.
2008 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2009 unsigned MaxRecurse) {
2010 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2014 if (match(Op1, m_Undef()))
2015 return Constant::getNullValue(Op0->getType());
2022 if (match(Op1, m_Zero()))
2023 return Constant::getNullValue(Op0->getType());
2026 if (match(Op1, m_AllOnes()))
2029 // A & ~A = ~A & A = 0
2030 if (match(Op0, m_Not(m_Specific(Op1))) ||
2031 match(Op1, m_Not(m_Specific(Op0))))
2032 return Constant::getNullValue(Op0->getType());
2035 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2039 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2042 // A mask that only clears known zeros of a shifted value is a no-op.
2046 if (match(Op1, m_APInt(Mask))) {
2047 // If all bits in the inverted and shifted mask are clear:
2048 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2049 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2050 (~(*Mask)).lshr(*ShAmt).isNullValue())
2053 // If all bits in the inverted and shifted mask are clear:
2054 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2055 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2056 (~(*Mask)).shl(*ShAmt).isNullValue())
2060 // If we have a multiplication overflow check that is being 'and'ed with a
2061 // check that one of the multipliers is not zero, we can omit the 'and', and
2062 // only keep the overflow check.
2063 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op0, Op1))
2065 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op1, Op0))
2068 // A & (-A) = A if A is a power of two or zero.
2069 if (match(Op0, m_Neg(m_Specific(Op1))) ||
2070 match(Op1, m_Neg(m_Specific(Op0)))) {
2071 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2074 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2079 // This is a similar pattern used for checking if a value is a power-of-2:
2080 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2081 // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2082 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2083 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2084 return Constant::getNullValue(Op1->getType());
2085 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2086 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2087 return Constant::getNullValue(Op0->getType());
2089 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2092 // Try some generic simplifications for associative operations.
2093 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
2097 // And distributes over Or. Try some generic simplifications based on this.
2098 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
2102 // And distributes over Xor. Try some generic simplifications based on this.
2103 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
2107 // If the operation is with the result of a select instruction, check whether
2108 // operating on either branch of the select always yields the same value.
2109 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
2110 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
2114 // If the operation is with the result of a phi instruction, check whether
2115 // operating on all incoming values of the phi always yields the same value.
2116 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2117 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
2121 // Assuming the effective width of Y is not larger than A, i.e. all bits
2122 // from X and Y are disjoint in (X << A) | Y,
2123 // if the mask of this AND op covers all bits of X or Y, while it covers
2124 // no bits from the other, we can bypass this AND op. E.g.,
2125 // ((X << A) | Y) & Mask -> Y,
2126 // if Mask = ((1 << effective_width_of(Y)) - 1)
2127 // ((X << A) | Y) & Mask -> X << A,
2128 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2129 // SimplifyDemandedBits in InstCombine can optimize the general case.
2130 // This pattern aims to help other passes for a common case.
2131 Value *Y, *XShifted;
2132 if (match(Op1, m_APInt(Mask)) &&
2133 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2136 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2137 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2138 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2139 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
2140 if (EffWidthY <= ShftCnt) {
2141 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,
2143 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros();
2144 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2145 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2146 // If the mask is extracting all bits from X or Y as is, we can skip
2148 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2150 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2158 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2159 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
2162 /// Given operands for an Or, see if we can fold the result.
2163 /// If not, this returns null.
2164 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2165 unsigned MaxRecurse) {
2166 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2171 // Do not return Op1 because it may contain undef elements if it's a vector.
2172 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes()))
2173 return Constant::getAllOnesValue(Op0->getType());
2177 if (Op0 == Op1 || match(Op1, m_Zero()))
2180 // A | ~A = ~A | A = -1
2181 if (match(Op0, m_Not(m_Specific(Op1))) ||
2182 match(Op1, m_Not(m_Specific(Op0))))
2183 return Constant::getAllOnesValue(Op0->getType());
2186 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
2190 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
2193 // ~(A & ?) | A = -1
2194 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
2195 return Constant::getAllOnesValue(Op1->getType());
2197 // A | ~(A & ?) = -1
2198 if (match(Op1, m_Not(m_c_And(m_Specific(Op0), m_Value()))))
2199 return Constant::getAllOnesValue(Op0->getType());
2202 // (A & ~B) | (A ^ B) -> (A ^ B)
2203 // (~B & A) | (A ^ B) -> (A ^ B)
2204 // (A & ~B) | (B ^ A) -> (B ^ A)
2205 // (~B & A) | (B ^ A) -> (B ^ A)
2206 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2207 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2208 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2211 // Commute the 'or' operands.
2212 // (A ^ B) | (A & ~B) -> (A ^ B)
2213 // (A ^ B) | (~B & A) -> (A ^ B)
2214 // (B ^ A) | (A & ~B) -> (B ^ A)
2215 // (B ^ A) | (~B & A) -> (B ^ A)
2216 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2217 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2218 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2221 // (A & B) | (~A ^ B) -> (~A ^ B)
2222 // (B & A) | (~A ^ B) -> (~A ^ B)
2223 // (A & B) | (B ^ ~A) -> (B ^ ~A)
2224 // (B & A) | (B ^ ~A) -> (B ^ ~A)
2225 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2226 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2227 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2230 // (~A ^ B) | (A & B) -> (~A ^ B)
2231 // (~A ^ B) | (B & A) -> (~A ^ B)
2232 // (B ^ ~A) | (A & B) -> (B ^ ~A)
2233 // (B ^ ~A) | (B & A) -> (B ^ ~A)
2234 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2235 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2236 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2239 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2242 // If we have a multiplication overflow check that is being 'and'ed with a
2243 // check that one of the multipliers is not zero, we can omit the 'and', and
2244 // only keep the overflow check.
2245 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op0, Op1))
2247 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op1, Op0))
2250 // Try some generic simplifications for associative operations.
2251 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
2255 // Or distributes over And. Try some generic simplifications based on this.
2256 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
2260 // If the operation is with the result of a select instruction, check whether
2261 // operating on either branch of the select always yields the same value.
2262 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
2263 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
2267 // (A & C1)|(B & C2)
2268 const APInt *C1, *C2;
2269 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2270 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2272 // (A & C1)|(B & C2)
2273 // If we have: ((V + N) & C1) | (V & C2)
2274 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2275 // replace with V+N.
2277 if (C2->isMask() && // C2 == 0+1+
2278 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2279 // Add commutes, try both ways.
2280 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2283 // Or commutes, try both ways.
2285 match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2286 // Add commutes, try both ways.
2287 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2293 // If the operation is with the result of a phi instruction, check whether
2294 // operating on all incoming values of the phi always yields the same value.
2295 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2296 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2302 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2303 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
2306 /// Given operands for a Xor, see if we can fold the result.
2307 /// If not, this returns null.
2308 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2309 unsigned MaxRecurse) {
2310 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2313 // A ^ undef -> undef
2314 if (match(Op1, m_Undef()))
2318 if (match(Op1, m_Zero()))
2323 return Constant::getNullValue(Op0->getType());
2325 // A ^ ~A = ~A ^ A = -1
2326 if (match(Op0, m_Not(m_Specific(Op1))) ||
2327 match(Op1, m_Not(m_Specific(Op0))))
2328 return Constant::getAllOnesValue(Op0->getType());
2330 // Try some generic simplifications for associative operations.
2331 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2335 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2336 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2337 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2338 // only if B and C are equal. If B and C are equal then (since we assume
2339 // that operands have already been simplified) "select(cond, B, C)" should
2340 // have been simplified to the common value of B and C already. Analysing
2341 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2342 // for threading over phi nodes.
2347 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2348 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2352 static Type *GetCompareTy(Value *Op) {
2353 return CmpInst::makeCmpResultType(Op->getType());
2356 /// Rummage around inside V looking for something equivalent to the comparison
2357 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2358 /// Helper function for analyzing max/min idioms.
2359 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2360 Value *LHS, Value *RHS) {
2361 SelectInst *SI = dyn_cast<SelectInst>(V);
2364 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2367 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2368 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2370 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2371 LHS == CmpRHS && RHS == CmpLHS)
2376 // A significant optimization not implemented here is assuming that alloca
2377 // addresses are not equal to incoming argument values. They don't *alias*,
2378 // as we say, but that doesn't mean they aren't equal, so we take a
2379 // conservative approach.
2381 // This is inspired in part by C++11 5.10p1:
2382 // "Two pointers of the same type compare equal if and only if they are both
2383 // null, both point to the same function, or both represent the same
2386 // This is pretty permissive.
2388 // It's also partly due to C11 6.5.9p6:
2389 // "Two pointers compare equal if and only if both are null pointers, both are
2390 // pointers to the same object (including a pointer to an object and a
2391 // subobject at its beginning) or function, both are pointers to one past the
2392 // last element of the same array object, or one is a pointer to one past the
2393 // end of one array object and the other is a pointer to the start of a
2394 // different array object that happens to immediately follow the first array
2395 // object in the address space.)
2397 // C11's version is more restrictive, however there's no reason why an argument
2398 // couldn't be a one-past-the-end value for a stack object in the caller and be
2399 // equal to the beginning of a stack object in the callee.
2401 // If the C and C++ standards are ever made sufficiently restrictive in this
2402 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2403 // this optimization.
2405 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
2406 const DominatorTree *DT, CmpInst::Predicate Pred,
2407 AssumptionCache *AC, const Instruction *CxtI,
2408 const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) {
2409 // First, skip past any trivial no-ops.
2410 LHS = LHS->stripPointerCasts();
2411 RHS = RHS->stripPointerCasts();
2413 // A non-null pointer is not equal to a null pointer.
2414 if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2415 llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2417 return ConstantInt::get(GetCompareTy(LHS),
2418 !CmpInst::isTrueWhenEqual(Pred));
2420 // We can only fold certain predicates on pointer comparisons.
2425 // Equality comaprisons are easy to fold.
2426 case CmpInst::ICMP_EQ:
2427 case CmpInst::ICMP_NE:
2430 // We can only handle unsigned relational comparisons because 'inbounds' on
2431 // a GEP only protects against unsigned wrapping.
2432 case CmpInst::ICMP_UGT:
2433 case CmpInst::ICMP_UGE:
2434 case CmpInst::ICMP_ULT:
2435 case CmpInst::ICMP_ULE:
2436 // However, we have to switch them to their signed variants to handle
2437 // negative indices from the base pointer.
2438 Pred = ICmpInst::getSignedPredicate(Pred);
2442 // Strip off any constant offsets so that we can reason about them.
2443 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2444 // here and compare base addresses like AliasAnalysis does, however there are
2445 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2446 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2447 // doesn't need to guarantee pointer inequality when it says NoAlias.
2448 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2449 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2451 // If LHS and RHS are related via constant offsets to the same base
2452 // value, we can replace it with an icmp which just compares the offsets.
2454 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2456 // Various optimizations for (in)equality comparisons.
2457 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2458 // Different non-empty allocations that exist at the same time have
2459 // different addresses (if the program can tell). Global variables always
2460 // exist, so they always exist during the lifetime of each other and all
2461 // allocas. Two different allocas usually have different addresses...
2463 // However, if there's an @llvm.stackrestore dynamically in between two
2464 // allocas, they may have the same address. It's tempting to reduce the
2465 // scope of the problem by only looking at *static* allocas here. That would
2466 // cover the majority of allocas while significantly reducing the likelihood
2467 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2468 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2469 // an entry block. Also, if we have a block that's not attached to a
2470 // function, we can't tell if it's "static" under the current definition.
2471 // Theoretically, this problem could be fixed by creating a new kind of
2472 // instruction kind specifically for static allocas. Such a new instruction
2473 // could be required to be at the top of the entry block, thus preventing it
2474 // from being subject to a @llvm.stackrestore. Instcombine could even
2475 // convert regular allocas into these special allocas. It'd be nifty.
2476 // However, until then, this problem remains open.
2478 // So, we'll assume that two non-empty allocas have different addresses
2481 // With all that, if the offsets are within the bounds of their allocations
2482 // (and not one-past-the-end! so we can't use inbounds!), and their
2483 // allocations aren't the same, the pointers are not equal.
2485 // Note that it's not necessary to check for LHS being a global variable
2486 // address, due to canonicalization and constant folding.
2487 if (isa<AllocaInst>(LHS) &&
2488 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2489 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2490 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2491 uint64_t LHSSize, RHSSize;
2492 ObjectSizeOpts Opts;
2493 Opts.NullIsUnknownSize =
2494 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
2495 if (LHSOffsetCI && RHSOffsetCI &&
2496 getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2497 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2498 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2499 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2500 if (!LHSOffsetValue.isNegative() &&
2501 !RHSOffsetValue.isNegative() &&
2502 LHSOffsetValue.ult(LHSSize) &&
2503 RHSOffsetValue.ult(RHSSize)) {
2504 return ConstantInt::get(GetCompareTy(LHS),
2505 !CmpInst::isTrueWhenEqual(Pred));
2509 // Repeat the above check but this time without depending on DataLayout
2510 // or being able to compute a precise size.
2511 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2512 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2513 LHSOffset->isNullValue() &&
2514 RHSOffset->isNullValue())
2515 return ConstantInt::get(GetCompareTy(LHS),
2516 !CmpInst::isTrueWhenEqual(Pred));
2519 // Even if an non-inbounds GEP occurs along the path we can still optimize
2520 // equality comparisons concerning the result. We avoid walking the whole
2521 // chain again by starting where the last calls to
2522 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2523 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2524 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2526 return ConstantExpr::getICmp(Pred,
2527 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2528 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2530 // If one side of the equality comparison must come from a noalias call
2531 // (meaning a system memory allocation function), and the other side must
2532 // come from a pointer that cannot overlap with dynamically-allocated
2533 // memory within the lifetime of the current function (allocas, byval
2534 // arguments, globals), then determine the comparison result here.
2535 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2536 GetUnderlyingObjects(LHS, LHSUObjs, DL);
2537 GetUnderlyingObjects(RHS, RHSUObjs, DL);
2539 // Is the set of underlying objects all noalias calls?
2540 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2541 return all_of(Objects, isNoAliasCall);
2544 // Is the set of underlying objects all things which must be disjoint from
2545 // noalias calls. For allocas, we consider only static ones (dynamic
2546 // allocas might be transformed into calls to malloc not simultaneously
2547 // live with the compared-to allocation). For globals, we exclude symbols
2548 // that might be resolve lazily to symbols in another dynamically-loaded
2549 // library (and, thus, could be malloc'ed by the implementation).
2550 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2551 return all_of(Objects, [](const Value *V) {
2552 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2553 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2554 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2555 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2556 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2557 !GV->isThreadLocal();
2558 if (const Argument *A = dyn_cast<Argument>(V))
2559 return A->hasByValAttr();
2564 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2565 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2566 return ConstantInt::get(GetCompareTy(LHS),
2567 !CmpInst::isTrueWhenEqual(Pred));
2569 // Fold comparisons for non-escaping pointer even if the allocation call
2570 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2571 // dynamic allocation call could be either of the operands.
2572 Value *MI = nullptr;
2573 if (isAllocLikeFn(LHS, TLI) &&
2574 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2576 else if (isAllocLikeFn(RHS, TLI) &&
2577 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2579 // FIXME: We should also fold the compare when the pointer escapes, but the
2580 // compare dominates the pointer escape
2581 if (MI && !PointerMayBeCaptured(MI, true, true))
2582 return ConstantInt::get(GetCompareTy(LHS),
2583 CmpInst::isFalseWhenEqual(Pred));
2590 /// Fold an icmp when its operands have i1 scalar type.
2591 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2592 Value *RHS, const SimplifyQuery &Q) {
2593 Type *ITy = GetCompareTy(LHS); // The return type.
2594 Type *OpTy = LHS->getType(); // The operand type.
2595 if (!OpTy->isIntOrIntVectorTy(1))
2598 // A boolean compared to true/false can be simplified in 14 out of the 20
2599 // (10 predicates * 2 constants) possible combinations. Cases not handled here
2600 // require a 'not' of the LHS, so those must be transformed in InstCombine.
2601 if (match(RHS, m_Zero())) {
2603 case CmpInst::ICMP_NE: // X != 0 -> X
2604 case CmpInst::ICMP_UGT: // X >u 0 -> X
2605 case CmpInst::ICMP_SLT: // X <s 0 -> X
2608 case CmpInst::ICMP_ULT: // X <u 0 -> false
2609 case CmpInst::ICMP_SGT: // X >s 0 -> false
2610 return getFalse(ITy);
2612 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2613 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2614 return getTrue(ITy);
2618 } else if (match(RHS, m_One())) {
2620 case CmpInst::ICMP_EQ: // X == 1 -> X
2621 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2622 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2625 case CmpInst::ICMP_UGT: // X >u 1 -> false
2626 case CmpInst::ICMP_SLT: // X <s -1 -> false
2627 return getFalse(ITy);
2629 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2630 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2631 return getTrue(ITy);
2640 case ICmpInst::ICMP_UGE:
2641 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2642 return getTrue(ITy);
2644 case ICmpInst::ICMP_SGE:
2645 /// For signed comparison, the values for an i1 are 0 and -1
2646 /// respectively. This maps into a truth table of:
2647 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2648 /// 0 | 0 | 1 (0 >= 0) | 1
2649 /// 0 | 1 | 1 (0 >= -1) | 1
2650 /// 1 | 0 | 0 (-1 >= 0) | 0
2651 /// 1 | 1 | 1 (-1 >= -1) | 1
2652 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2653 return getTrue(ITy);
2655 case ICmpInst::ICMP_ULE:
2656 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2657 return getTrue(ITy);
2664 /// Try hard to fold icmp with zero RHS because this is a common case.
2665 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2666 Value *RHS, const SimplifyQuery &Q) {
2667 if (!match(RHS, m_Zero()))
2670 Type *ITy = GetCompareTy(LHS); // The return type.
2673 llvm_unreachable("Unknown ICmp predicate!");
2674 case ICmpInst::ICMP_ULT:
2675 return getFalse(ITy);
2676 case ICmpInst::ICMP_UGE:
2677 return getTrue(ITy);
2678 case ICmpInst::ICMP_EQ:
2679 case ICmpInst::ICMP_ULE:
2680 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2681 return getFalse(ITy);
2683 case ICmpInst::ICMP_NE:
2684 case ICmpInst::ICMP_UGT:
2685 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2686 return getTrue(ITy);
2688 case ICmpInst::ICMP_SLT: {
2689 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2690 if (LHSKnown.isNegative())
2691 return getTrue(ITy);
2692 if (LHSKnown.isNonNegative())
2693 return getFalse(ITy);
2696 case ICmpInst::ICMP_SLE: {
2697 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2698 if (LHSKnown.isNegative())
2699 return getTrue(ITy);
2700 if (LHSKnown.isNonNegative() &&
2701 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2702 return getFalse(ITy);
2705 case ICmpInst::ICMP_SGE: {
2706 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2707 if (LHSKnown.isNegative())
2708 return getFalse(ITy);
2709 if (LHSKnown.isNonNegative())
2710 return getTrue(ITy);
2713 case ICmpInst::ICMP_SGT: {
2714 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2715 if (LHSKnown.isNegative())
2716 return getFalse(ITy);
2717 if (LHSKnown.isNonNegative() &&
2718 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2719 return getTrue(ITy);
2727 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2728 Value *RHS, const InstrInfoQuery &IIQ) {
2729 Type *ITy = GetCompareTy(RHS); // The return type.
2732 // Sign-bit checks can be optimized to true/false after unsigned
2733 // floating-point casts:
2734 // icmp slt (bitcast (uitofp X)), 0 --> false
2735 // icmp sgt (bitcast (uitofp X)), -1 --> true
2736 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2737 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2738 return ConstantInt::getFalse(ITy);
2739 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2740 return ConstantInt::getTrue(ITy);
2744 if (!match(RHS, m_APInt(C)))
2747 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2748 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2749 if (RHS_CR.isEmptySet())
2750 return ConstantInt::getFalse(ITy);
2751 if (RHS_CR.isFullSet())
2752 return ConstantInt::getTrue(ITy);
2754 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo);
2755 if (!LHS_CR.isFullSet()) {
2756 if (RHS_CR.contains(LHS_CR))
2757 return ConstantInt::getTrue(ITy);
2758 if (RHS_CR.inverse().contains(LHS_CR))
2759 return ConstantInt::getFalse(ITy);
2765 /// TODO: A large part of this logic is duplicated in InstCombine's
2766 /// foldICmpBinOp(). We should be able to share that and avoid the code
2768 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2769 Value *RHS, const SimplifyQuery &Q,
2770 unsigned MaxRecurse) {
2771 Type *ITy = GetCompareTy(LHS); // The return type.
2773 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2774 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2775 if (MaxRecurse && (LBO || RBO)) {
2776 // Analyze the case when either LHS or RHS is an add instruction.
2777 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2778 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2779 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2780 if (LBO && LBO->getOpcode() == Instruction::Add) {
2781 A = LBO->getOperand(0);
2782 B = LBO->getOperand(1);
2784 ICmpInst::isEquality(Pred) ||
2785 (CmpInst::isUnsigned(Pred) &&
2786 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
2787 (CmpInst::isSigned(Pred) &&
2788 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
2790 if (RBO && RBO->getOpcode() == Instruction::Add) {
2791 C = RBO->getOperand(0);
2792 D = RBO->getOperand(1);
2794 ICmpInst::isEquality(Pred) ||
2795 (CmpInst::isUnsigned(Pred) &&
2796 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
2797 (CmpInst::isSigned(Pred) &&
2798 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
2801 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2802 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2803 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2804 Constant::getNullValue(RHS->getType()), Q,
2808 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2809 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2811 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2812 C == LHS ? D : C, Q, MaxRecurse - 1))
2815 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2816 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
2818 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2821 // C + B == C + D -> B == D
2824 } else if (A == D) {
2825 // D + B == C + D -> B == C
2828 } else if (B == C) {
2829 // A + C == C + D -> A == D
2834 // A + D == C + D -> A == C
2838 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
2845 // icmp pred (or X, Y), X
2846 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2847 if (Pred == ICmpInst::ICMP_ULT)
2848 return getFalse(ITy);
2849 if (Pred == ICmpInst::ICMP_UGE)
2850 return getTrue(ITy);
2852 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2853 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2854 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2855 if (RHSKnown.isNonNegative() && YKnown.isNegative())
2856 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2857 if (RHSKnown.isNegative() || YKnown.isNonNegative())
2858 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2861 // icmp pred X, (or X, Y)
2862 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) {
2863 if (Pred == ICmpInst::ICMP_ULE)
2864 return getTrue(ITy);
2865 if (Pred == ICmpInst::ICMP_UGT)
2866 return getFalse(ITy);
2868 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) {
2869 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2870 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2871 if (LHSKnown.isNonNegative() && YKnown.isNegative())
2872 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
2873 if (LHSKnown.isNegative() || YKnown.isNonNegative())
2874 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy);
2879 // icmp pred (and X, Y), X
2880 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2881 if (Pred == ICmpInst::ICMP_UGT)
2882 return getFalse(ITy);
2883 if (Pred == ICmpInst::ICMP_ULE)
2884 return getTrue(ITy);
2886 // icmp pred X, (and X, Y)
2887 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) {
2888 if (Pred == ICmpInst::ICMP_UGE)
2889 return getTrue(ITy);
2890 if (Pred == ICmpInst::ICMP_ULT)
2891 return getFalse(ITy);
2894 // 0 - (zext X) pred C
2895 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
2896 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2897 if (RHSC->getValue().isStrictlyPositive()) {
2898 if (Pred == ICmpInst::ICMP_SLT)
2899 return ConstantInt::getTrue(RHSC->getContext());
2900 if (Pred == ICmpInst::ICMP_SGE)
2901 return ConstantInt::getFalse(RHSC->getContext());
2902 if (Pred == ICmpInst::ICMP_EQ)
2903 return ConstantInt::getFalse(RHSC->getContext());
2904 if (Pred == ICmpInst::ICMP_NE)
2905 return ConstantInt::getTrue(RHSC->getContext());
2907 if (RHSC->getValue().isNonNegative()) {
2908 if (Pred == ICmpInst::ICMP_SLE)
2909 return ConstantInt::getTrue(RHSC->getContext());
2910 if (Pred == ICmpInst::ICMP_SGT)
2911 return ConstantInt::getFalse(RHSC->getContext());
2916 // icmp pred (urem X, Y), Y
2917 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2921 case ICmpInst::ICMP_SGT:
2922 case ICmpInst::ICMP_SGE: {
2923 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2924 if (!Known.isNonNegative())
2928 case ICmpInst::ICMP_EQ:
2929 case ICmpInst::ICMP_UGT:
2930 case ICmpInst::ICMP_UGE:
2931 return getFalse(ITy);
2932 case ICmpInst::ICMP_SLT:
2933 case ICmpInst::ICMP_SLE: {
2934 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2935 if (!Known.isNonNegative())
2939 case ICmpInst::ICMP_NE:
2940 case ICmpInst::ICMP_ULT:
2941 case ICmpInst::ICMP_ULE:
2942 return getTrue(ITy);
2946 // icmp pred X, (urem Y, X)
2947 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
2951 case ICmpInst::ICMP_SGT:
2952 case ICmpInst::ICMP_SGE: {
2953 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2954 if (!Known.isNonNegative())
2958 case ICmpInst::ICMP_NE:
2959 case ICmpInst::ICMP_UGT:
2960 case ICmpInst::ICMP_UGE:
2961 return getTrue(ITy);
2962 case ICmpInst::ICMP_SLT:
2963 case ICmpInst::ICMP_SLE: {
2964 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2965 if (!Known.isNonNegative())
2969 case ICmpInst::ICMP_EQ:
2970 case ICmpInst::ICMP_ULT:
2971 case ICmpInst::ICMP_ULE:
2972 return getFalse(ITy);
2978 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2979 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) {
2980 // icmp pred (X op Y), X
2981 if (Pred == ICmpInst::ICMP_UGT)
2982 return getFalse(ITy);
2983 if (Pred == ICmpInst::ICMP_ULE)
2984 return getTrue(ITy);
2989 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) ||
2990 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) {
2991 // icmp pred X, (X op Y)
2992 if (Pred == ICmpInst::ICMP_ULT)
2993 return getFalse(ITy);
2994 if (Pred == ICmpInst::ICMP_UGE)
2995 return getTrue(ITy);
3002 // where CI2 is a power of 2 and CI isn't
3003 if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
3004 const APInt *CI2Val, *CIVal = &CI->getValue();
3005 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
3006 CI2Val->isPowerOf2()) {
3007 if (!CIVal->isPowerOf2()) {
3008 // CI2 << X can equal zero in some circumstances,
3009 // this simplification is unsafe if CI is zero.
3011 // We know it is safe if:
3012 // - The shift is nsw, we can't shift out the one bit.
3013 // - The shift is nuw, we can't shift out the one bit.
3016 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3017 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3018 CI2Val->isOneValue() || !CI->isZero()) {
3019 if (Pred == ICmpInst::ICMP_EQ)
3020 return ConstantInt::getFalse(RHS->getContext());
3021 if (Pred == ICmpInst::ICMP_NE)
3022 return ConstantInt::getTrue(RHS->getContext());
3025 if (CIVal->isSignMask() && CI2Val->isOneValue()) {
3026 if (Pred == ICmpInst::ICMP_UGT)
3027 return ConstantInt::getFalse(RHS->getContext());
3028 if (Pred == ICmpInst::ICMP_ULE)
3029 return ConstantInt::getTrue(RHS->getContext());
3034 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
3035 LBO->getOperand(1) == RBO->getOperand(1)) {
3036 switch (LBO->getOpcode()) {
3039 case Instruction::UDiv:
3040 case Instruction::LShr:
3041 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3042 !Q.IIQ.isExact(RBO))
3044 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3045 RBO->getOperand(0), Q, MaxRecurse - 1))
3048 case Instruction::SDiv:
3049 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3050 !Q.IIQ.isExact(RBO))
3052 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3053 RBO->getOperand(0), Q, MaxRecurse - 1))
3056 case Instruction::AShr:
3057 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3059 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3060 RBO->getOperand(0), Q, MaxRecurse - 1))
3063 case Instruction::Shl: {
3064 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3065 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3068 if (!NSW && ICmpInst::isSigned(Pred))
3070 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3071 RBO->getOperand(0), Q, MaxRecurse - 1))
3080 /// Simplify integer comparisons where at least one operand of the compare
3081 /// matches an integer min/max idiom.
3082 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3083 Value *RHS, const SimplifyQuery &Q,
3084 unsigned MaxRecurse) {
3085 Type *ITy = GetCompareTy(LHS); // The return type.
3087 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3088 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3090 // Signed variants on "max(a,b)>=a -> true".
3091 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3093 std::swap(A, B); // smax(A, B) pred A.
3094 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3095 // We analyze this as smax(A, B) pred A.
3097 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3098 (A == LHS || B == LHS)) {
3100 std::swap(A, B); // A pred smax(A, B).
3101 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3102 // We analyze this as smax(A, B) swapped-pred A.
3103 P = CmpInst::getSwappedPredicate(Pred);
3104 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3105 (A == RHS || B == RHS)) {
3107 std::swap(A, B); // smin(A, B) pred A.
3108 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3109 // We analyze this as smax(-A, -B) swapped-pred -A.
3110 // Note that we do not need to actually form -A or -B thanks to EqP.
3111 P = CmpInst::getSwappedPredicate(Pred);
3112 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3113 (A == LHS || B == LHS)) {
3115 std::swap(A, B); // A pred smin(A, B).
3116 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3117 // We analyze this as smax(-A, -B) pred -A.
3118 // Note that we do not need to actually form -A or -B thanks to EqP.
3121 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3122 // Cases correspond to "max(A, B) p A".
3126 case CmpInst::ICMP_EQ:
3127 case CmpInst::ICMP_SLE:
3128 // Equivalent to "A EqP B". This may be the same as the condition tested
3129 // in the max/min; if so, we can just return that.
3130 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3132 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3134 // Otherwise, see if "A EqP B" simplifies.
3136 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3139 case CmpInst::ICMP_NE:
3140 case CmpInst::ICMP_SGT: {
3141 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3142 // Equivalent to "A InvEqP B". This may be the same as the condition
3143 // tested in the max/min; if so, we can just return that.
3144 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3146 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3148 // Otherwise, see if "A InvEqP B" simplifies.
3150 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3154 case CmpInst::ICMP_SGE:
3156 return getTrue(ITy);
3157 case CmpInst::ICMP_SLT:
3159 return getFalse(ITy);
3163 // Unsigned variants on "max(a,b)>=a -> true".
3164 P = CmpInst::BAD_ICMP_PREDICATE;
3165 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3167 std::swap(A, B); // umax(A, B) pred A.
3168 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3169 // We analyze this as umax(A, B) pred A.
3171 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3172 (A == LHS || B == LHS)) {
3174 std::swap(A, B); // A pred umax(A, B).
3175 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3176 // We analyze this as umax(A, B) swapped-pred A.
3177 P = CmpInst::getSwappedPredicate(Pred);
3178 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3179 (A == RHS || B == RHS)) {
3181 std::swap(A, B); // umin(A, B) pred A.
3182 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3183 // We analyze this as umax(-A, -B) swapped-pred -A.
3184 // Note that we do not need to actually form -A or -B thanks to EqP.
3185 P = CmpInst::getSwappedPredicate(Pred);
3186 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3187 (A == LHS || B == LHS)) {
3189 std::swap(A, B); // A pred umin(A, B).
3190 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3191 // We analyze this as umax(-A, -B) pred -A.
3192 // Note that we do not need to actually form -A or -B thanks to EqP.
3195 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3196 // Cases correspond to "max(A, B) p A".
3200 case CmpInst::ICMP_EQ:
3201 case CmpInst::ICMP_ULE:
3202 // Equivalent to "A EqP B". This may be the same as the condition tested
3203 // in the max/min; if so, we can just return that.
3204 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3206 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3208 // Otherwise, see if "A EqP B" simplifies.
3210 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3213 case CmpInst::ICMP_NE:
3214 case CmpInst::ICMP_UGT: {
3215 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3216 // Equivalent to "A InvEqP B". This may be the same as the condition
3217 // tested in the max/min; if so, we can just return that.
3218 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3220 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3222 // Otherwise, see if "A InvEqP B" simplifies.
3224 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3228 case CmpInst::ICMP_UGE:
3230 return getTrue(ITy);
3231 case CmpInst::ICMP_ULT:
3233 return getFalse(ITy);
3237 // Variants on "max(x,y) >= min(x,z)".
3239 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3240 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3241 (A == C || A == D || B == C || B == D)) {
3242 // max(x, ?) pred min(x, ?).
3243 if (Pred == CmpInst::ICMP_SGE)
3245 return getTrue(ITy);
3246 if (Pred == CmpInst::ICMP_SLT)
3248 return getFalse(ITy);
3249 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3250 match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
3251 (A == C || A == D || B == C || B == D)) {
3252 // min(x, ?) pred max(x, ?).
3253 if (Pred == CmpInst::ICMP_SLE)
3255 return getTrue(ITy);
3256 if (Pred == CmpInst::ICMP_SGT)
3258 return getFalse(ITy);
3259 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3260 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3261 (A == C || A == D || B == C || B == D)) {
3262 // max(x, ?) pred min(x, ?).
3263 if (Pred == CmpInst::ICMP_UGE)
3265 return getTrue(ITy);
3266 if (Pred == CmpInst::ICMP_ULT)
3268 return getFalse(ITy);
3269 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3270 match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
3271 (A == C || A == D || B == C || B == D)) {
3272 // min(x, ?) pred max(x, ?).
3273 if (Pred == CmpInst::ICMP_ULE)
3275 return getTrue(ITy);
3276 if (Pred == CmpInst::ICMP_UGT)
3278 return getFalse(ITy);
3284 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3285 Value *LHS, Value *RHS,
3286 const SimplifyQuery &Q) {
3287 // Gracefully handle instructions that have not been inserted yet.
3288 if (!Q.AC || !Q.CxtI || !Q.CxtI->getParent())
3291 for (Value *AssumeBaseOp : {LHS, RHS}) {
3292 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3296 CallInst *Assume = cast<CallInst>(AssumeVH);
3297 if (Optional<bool> Imp =
3298 isImpliedCondition(Assume->getArgOperand(0), Predicate, LHS, RHS,
3300 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3301 return ConstantInt::get(GetCompareTy(LHS), *Imp);
3308 /// Given operands for an ICmpInst, see if we can fold the result.
3309 /// If not, this returns null.
3310 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3311 const SimplifyQuery &Q, unsigned MaxRecurse) {
3312 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3313 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3315 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3316 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3317 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3319 // If we have a constant, make sure it is on the RHS.
3320 std::swap(LHS, RHS);
3321 Pred = CmpInst::getSwappedPredicate(Pred);
3323 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3325 Type *ITy = GetCompareTy(LHS); // The return type.
3327 // For EQ and NE, we can always pick a value for the undef to make the
3328 // predicate pass or fail, so we can return undef.
3329 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3330 if (isa<UndefValue>(RHS) && ICmpInst::isEquality(Pred))
3331 return UndefValue::get(ITy);
3333 // icmp X, X -> true/false
3334 // icmp X, undef -> true/false because undef could be X.
3335 if (LHS == RHS || isa<UndefValue>(RHS))
3336 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3338 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3341 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3344 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3347 // If both operands have range metadata, use the metadata
3348 // to simplify the comparison.
3349 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3350 auto RHS_Instr = cast<Instruction>(RHS);
3351 auto LHS_Instr = cast<Instruction>(LHS);
3353 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3354 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3355 auto RHS_CR = getConstantRangeFromMetadata(
3356 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3357 auto LHS_CR = getConstantRangeFromMetadata(
3358 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3360 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
3361 if (Satisfied_CR.contains(LHS_CR))
3362 return ConstantInt::getTrue(RHS->getContext());
3364 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
3365 CmpInst::getInversePredicate(Pred), RHS_CR);
3366 if (InversedSatisfied_CR.contains(LHS_CR))
3367 return ConstantInt::getFalse(RHS->getContext());
3371 // Compare of cast, for example (zext X) != 0 -> X != 0
3372 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3373 Instruction *LI = cast<CastInst>(LHS);
3374 Value *SrcOp = LI->getOperand(0);
3375 Type *SrcTy = SrcOp->getType();
3376 Type *DstTy = LI->getType();
3378 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3379 // if the integer type is the same size as the pointer type.
3380 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3381 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3382 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3383 // Transfer the cast to the constant.
3384 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3385 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3388 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3389 if (RI->getOperand(0)->getType() == SrcTy)
3390 // Compare without the cast.
3391 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3397 if (isa<ZExtInst>(LHS)) {
3398 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3400 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3401 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3402 // Compare X and Y. Note that signed predicates become unsigned.
3403 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3404 SrcOp, RI->getOperand(0), Q,
3408 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3409 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3410 if (SrcOp == RI->getOperand(0)) {
3411 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3412 return ConstantInt::getTrue(ITy);
3413 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3414 return ConstantInt::getFalse(ITy);
3417 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3418 // too. If not, then try to deduce the result of the comparison.
3419 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3420 // Compute the constant that would happen if we truncated to SrcTy then
3421 // reextended to DstTy.
3422 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3423 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3425 // If the re-extended constant didn't change then this is effectively
3426 // also a case of comparing two zero-extended values.
3427 if (RExt == CI && MaxRecurse)
3428 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3429 SrcOp, Trunc, Q, MaxRecurse-1))
3432 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3433 // there. Use this to work out the result of the comparison.
3436 default: llvm_unreachable("Unknown ICmp predicate!");
3438 case ICmpInst::ICMP_EQ:
3439 case ICmpInst::ICMP_UGT:
3440 case ICmpInst::ICMP_UGE:
3441 return ConstantInt::getFalse(CI->getContext());
3443 case ICmpInst::ICMP_NE:
3444 case ICmpInst::ICMP_ULT:
3445 case ICmpInst::ICMP_ULE:
3446 return ConstantInt::getTrue(CI->getContext());
3448 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3449 // is non-negative then LHS <s RHS.
3450 case ICmpInst::ICMP_SGT:
3451 case ICmpInst::ICMP_SGE:
3452 return CI->getValue().isNegative() ?
3453 ConstantInt::getTrue(CI->getContext()) :
3454 ConstantInt::getFalse(CI->getContext());
3456 case ICmpInst::ICMP_SLT:
3457 case ICmpInst::ICMP_SLE:
3458 return CI->getValue().isNegative() ?
3459 ConstantInt::getFalse(CI->getContext()) :
3460 ConstantInt::getTrue(CI->getContext());
3466 if (isa<SExtInst>(LHS)) {
3467 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3469 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3470 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3471 // Compare X and Y. Note that the predicate does not change.
3472 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3476 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3477 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3478 if (SrcOp == RI->getOperand(0)) {
3479 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3480 return ConstantInt::getTrue(ITy);
3481 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3482 return ConstantInt::getFalse(ITy);
3485 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3486 // too. If not, then try to deduce the result of the comparison.
3487 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3488 // Compute the constant that would happen if we truncated to SrcTy then
3489 // reextended to DstTy.
3490 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3491 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3493 // If the re-extended constant didn't change then this is effectively
3494 // also a case of comparing two sign-extended values.
3495 if (RExt == CI && MaxRecurse)
3496 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3499 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3500 // bits there. Use this to work out the result of the comparison.
3503 default: llvm_unreachable("Unknown ICmp predicate!");
3504 case ICmpInst::ICMP_EQ:
3505 return ConstantInt::getFalse(CI->getContext());
3506 case ICmpInst::ICMP_NE:
3507 return ConstantInt::getTrue(CI->getContext());
3509 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3511 case ICmpInst::ICMP_SGT:
3512 case ICmpInst::ICMP_SGE:
3513 return CI->getValue().isNegative() ?
3514 ConstantInt::getTrue(CI->getContext()) :
3515 ConstantInt::getFalse(CI->getContext());
3516 case ICmpInst::ICMP_SLT:
3517 case ICmpInst::ICMP_SLE:
3518 return CI->getValue().isNegative() ?
3519 ConstantInt::getFalse(CI->getContext()) :
3520 ConstantInt::getTrue(CI->getContext());
3522 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3524 case ICmpInst::ICMP_UGT:
3525 case ICmpInst::ICMP_UGE:
3526 // Comparison is true iff the LHS <s 0.
3528 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3529 Constant::getNullValue(SrcTy),
3533 case ICmpInst::ICMP_ULT:
3534 case ICmpInst::ICMP_ULE:
3535 // Comparison is true iff the LHS >=s 0.
3537 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3538 Constant::getNullValue(SrcTy),
3548 // icmp eq|ne X, Y -> false|true if X != Y
3549 if (ICmpInst::isEquality(Pred) &&
3550 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3551 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3554 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3557 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3560 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3563 // Simplify comparisons of related pointers using a powerful, recursive
3564 // GEP-walk when we have target data available..
3565 if (LHS->getType()->isPointerTy())
3566 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
3569 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3570 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3571 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3572 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3573 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3574 Q.DL.getTypeSizeInBits(CRHS->getType()))
3575 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
3576 Q.IIQ, CLHS->getPointerOperand(),
3577 CRHS->getPointerOperand()))
3580 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3581 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3582 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3583 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3584 (ICmpInst::isEquality(Pred) ||
3585 (GLHS->isInBounds() && GRHS->isInBounds() &&
3586 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3587 // The bases are equal and the indices are constant. Build a constant
3588 // expression GEP with the same indices and a null base pointer to see
3589 // what constant folding can make out of it.
3590 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3591 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
3592 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3593 GLHS->getSourceElementType(), Null, IndicesLHS);
3595 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3596 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3597 GLHS->getSourceElementType(), Null, IndicesRHS);
3598 Constant *NewICmp = ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3599 return ConstantFoldConstant(NewICmp, Q.DL);
3604 // If the comparison is with the result of a select instruction, check whether
3605 // comparing with either branch of the select always yields the same value.
3606 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3607 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3610 // If the comparison is with the result of a phi instruction, check whether
3611 // doing the compare with each incoming phi value yields a common result.
3612 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3613 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3619 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3620 const SimplifyQuery &Q) {
3621 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3624 /// Given operands for an FCmpInst, see if we can fold the result.
3625 /// If not, this returns null.
3626 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3627 FastMathFlags FMF, const SimplifyQuery &Q,
3628 unsigned MaxRecurse) {
3629 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3630 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3632 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3633 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3634 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3636 // If we have a constant, make sure it is on the RHS.
3637 std::swap(LHS, RHS);
3638 Pred = CmpInst::getSwappedPredicate(Pred);
3641 // Fold trivial predicates.
3642 Type *RetTy = GetCompareTy(LHS);
3643 if (Pred == FCmpInst::FCMP_FALSE)
3644 return getFalse(RetTy);
3645 if (Pred == FCmpInst::FCMP_TRUE)
3646 return getTrue(RetTy);
3648 // Fold (un)ordered comparison if we can determine there are no NaNs.
3649 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
3651 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI)))
3652 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
3654 // NaN is unordered; NaN is not ordered.
3655 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3656 "Comparison must be either ordered or unordered");
3657 if (match(RHS, m_NaN()))
3658 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3660 // fcmp pred x, undef and fcmp pred undef, x
3661 // fold to true if unordered, false if ordered
3662 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
3663 // Choosing NaN for the undef will always make unordered comparison succeed
3664 // and ordered comparison fail.
3665 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3668 // fcmp x,x -> true/false. Not all compares are foldable.
3670 if (CmpInst::isTrueWhenEqual(Pred))
3671 return getTrue(RetTy);
3672 if (CmpInst::isFalseWhenEqual(Pred))
3673 return getFalse(RetTy);
3676 // Handle fcmp with constant RHS.
3677 // TODO: Use match with a specific FP value, so these work with vectors with
3680 if (match(RHS, m_APFloat(C))) {
3681 // Check whether the constant is an infinity.
3682 if (C->isInfinity()) {
3683 if (C->isNegative()) {
3685 case FCmpInst::FCMP_OLT:
3686 // No value is ordered and less than negative infinity.
3687 return getFalse(RetTy);
3688 case FCmpInst::FCMP_UGE:
3689 // All values are unordered with or at least negative infinity.
3690 return getTrue(RetTy);
3696 case FCmpInst::FCMP_OGT:
3697 // No value is ordered and greater than infinity.
3698 return getFalse(RetTy);
3699 case FCmpInst::FCMP_ULE:
3700 // All values are unordered with and at most infinity.
3701 return getTrue(RetTy);
3707 if (C->isNegative() && !C->isNegZero()) {
3708 assert(!C->isNaN() && "Unexpected NaN constant!");
3709 // TODO: We can catch more cases by using a range check rather than
3710 // relying on CannotBeOrderedLessThanZero.
3712 case FCmpInst::FCMP_UGE:
3713 case FCmpInst::FCMP_UGT:
3714 case FCmpInst::FCMP_UNE:
3715 // (X >= 0) implies (X > C) when (C < 0)
3716 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3717 return getTrue(RetTy);
3719 case FCmpInst::FCMP_OEQ:
3720 case FCmpInst::FCMP_OLE:
3721 case FCmpInst::FCMP_OLT:
3722 // (X >= 0) implies !(X < C) when (C < 0)
3723 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3724 return getFalse(RetTy);
3731 // Check comparison of [minnum/maxnum with constant] with other constant.
3733 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
3735 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
3738 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
3739 // The ordered relationship and minnum/maxnum guarantee that we do not
3740 // have NaN constants, so ordered/unordered preds are handled the same.
3742 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ:
3743 // minnum(X, LesserC) == C --> false
3744 // maxnum(X, GreaterC) == C --> false
3745 return getFalse(RetTy);
3746 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE:
3747 // minnum(X, LesserC) != C --> true
3748 // maxnum(X, GreaterC) != C --> true
3749 return getTrue(RetTy);
3750 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE:
3751 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT:
3752 // minnum(X, LesserC) >= C --> false
3753 // minnum(X, LesserC) > C --> false
3754 // maxnum(X, GreaterC) >= C --> true
3755 // maxnum(X, GreaterC) > C --> true
3756 return ConstantInt::get(RetTy, IsMaxNum);
3757 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE:
3758 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT:
3759 // minnum(X, LesserC) <= C --> true
3760 // minnum(X, LesserC) < C --> true
3761 // maxnum(X, GreaterC) <= C --> false
3762 // maxnum(X, GreaterC) < C --> false
3763 return ConstantInt::get(RetTy, !IsMaxNum);
3765 // TRUE/FALSE/ORD/UNO should be handled before this.
3766 llvm_unreachable("Unexpected fcmp predicate");
3771 if (match(RHS, m_AnyZeroFP())) {
3773 case FCmpInst::FCMP_OGE:
3774 case FCmpInst::FCMP_ULT:
3775 // Positive or zero X >= 0.0 --> true
3776 // Positive or zero X < 0.0 --> false
3777 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) &&
3778 CannotBeOrderedLessThanZero(LHS, Q.TLI))
3779 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
3781 case FCmpInst::FCMP_UGE:
3782 case FCmpInst::FCMP_OLT:
3783 // Positive or zero or nan X >= 0.0 --> true
3784 // Positive or zero or nan X < 0.0 --> false
3785 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3786 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
3793 // If the comparison is with the result of a select instruction, check whether
3794 // comparing with either branch of the select always yields the same value.
3795 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3796 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3799 // If the comparison is with the result of a phi instruction, check whether
3800 // doing the compare with each incoming phi value yields a common result.
3801 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3802 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3808 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3809 FastMathFlags FMF, const SimplifyQuery &Q) {
3810 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3813 static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3814 const SimplifyQuery &Q,
3815 bool AllowRefinement,
3816 unsigned MaxRecurse) {
3817 // Trivial replacement.
3821 // We cannot replace a constant, and shouldn't even try.
3822 if (isa<Constant>(Op))
3825 auto *I = dyn_cast<Instruction>(V);
3830 // %cmp = icmp eq i32 %x, 2147483647
3831 // %add = add nsw i32 %x, 1
3832 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3834 // We can't replace %sel with %add unless we strip away the flags (which will
3835 // be done in InstCombine).
3836 // TODO: This is unsound, because it only catches some forms of refinement.
3837 if (!AllowRefinement && canCreatePoison(I))
3840 // If this is a binary operator, try to simplify it with the replaced op.
3841 if (auto *B = dyn_cast<BinaryOperator>(I)) {
3843 if (B->getOperand(0) == Op)
3844 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
3846 if (B->getOperand(1) == Op)
3847 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
3852 // Same for CmpInsts.
3853 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
3855 if (C->getOperand(0) == Op)
3856 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
3858 if (C->getOperand(1) == Op)
3859 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
3865 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3867 SmallVector<Value *, 8> NewOps(GEP->getNumOperands());
3868 transform(GEP->operands(), NewOps.begin(),
3869 [&](Value *V) { return V == Op ? RepOp : V; });
3870 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q,
3875 // TODO: We could hand off more cases to instsimplify here.
3877 // If all operands are constant after substituting Op for RepOp then we can
3878 // constant fold the instruction.
3879 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3880 // Build a list of all constant operands.
3881 SmallVector<Constant *, 8> ConstOps;
3882 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3883 if (I->getOperand(i) == Op)
3884 ConstOps.push_back(CRepOp);
3885 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
3886 ConstOps.push_back(COp);
3891 // All operands were constants, fold it.
3892 if (ConstOps.size() == I->getNumOperands()) {
3893 if (CmpInst *C = dyn_cast<CmpInst>(I))
3894 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3895 ConstOps[1], Q.DL, Q.TLI);
3897 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3898 if (!LI->isVolatile())
3899 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3901 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3908 Value *llvm::SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3909 const SimplifyQuery &Q,
3910 bool AllowRefinement) {
3911 return ::SimplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
3915 /// Try to simplify a select instruction when its condition operand is an
3916 /// integer comparison where one operand of the compare is a constant.
3917 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3918 const APInt *Y, bool TrueWhenUnset) {
3921 // (X & Y) == 0 ? X & ~Y : X --> X
3922 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3923 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3925 return TrueWhenUnset ? FalseVal : TrueVal;
3927 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3928 // (X & Y) != 0 ? X : X & ~Y --> X
3929 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3931 return TrueWhenUnset ? FalseVal : TrueVal;
3933 if (Y->isPowerOf2()) {
3934 // (X & Y) == 0 ? X | Y : X --> X | Y
3935 // (X & Y) != 0 ? X | Y : X --> X
3936 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3938 return TrueWhenUnset ? TrueVal : FalseVal;
3940 // (X & Y) == 0 ? X : X | Y --> X
3941 // (X & Y) != 0 ? X : X | Y --> X | Y
3942 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3944 return TrueWhenUnset ? TrueVal : FalseVal;
3950 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
3952 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
3953 ICmpInst::Predicate Pred,
3954 Value *TrueVal, Value *FalseVal) {
3957 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
3960 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
3961 Pred == ICmpInst::ICMP_EQ);
3964 /// Try to simplify a select instruction when its condition operand is an
3965 /// integer comparison.
3966 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
3967 Value *FalseVal, const SimplifyQuery &Q,
3968 unsigned MaxRecurse) {
3969 ICmpInst::Predicate Pred;
3970 Value *CmpLHS, *CmpRHS;
3971 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
3974 // Canonicalize ne to eq predicate.
3975 if (Pred == ICmpInst::ICMP_NE) {
3976 Pred = ICmpInst::ICMP_EQ;
3977 std::swap(TrueVal, FalseVal);
3980 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
3983 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
3984 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
3985 /*TrueWhenUnset=*/true))
3988 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
3990 auto isFsh = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(),
3992 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X),
3994 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
3995 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
3996 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
3999 // Test for a zero-shift-guard-op around rotates. These are used to
4000 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4001 // intrinsics do not have that problem.
4002 // We do not allow this transform for the general funnel shift case because
4003 // that would not preserve the poison safety of the original code.
4004 auto isRotate = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X),
4007 m_Intrinsic<Intrinsic::fshr>(m_Value(X),
4010 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4011 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4012 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4013 Pred == ICmpInst::ICMP_EQ)
4017 // Check for other compares that behave like bit test.
4018 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
4022 // If we have an equality comparison, then we know the value in one of the
4023 // arms of the select. See if substituting this value into the arm and
4024 // simplifying the result yields the same value as the other arm.
4025 if (Pred == ICmpInst::ICMP_EQ) {
4026 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4027 /* AllowRefinement */ false, MaxRecurse) ==
4029 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
4030 /* AllowRefinement */ false, MaxRecurse) ==
4033 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4034 /* AllowRefinement */ true, MaxRecurse) ==
4036 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
4037 /* AllowRefinement */ true, MaxRecurse) ==
4045 /// Try to simplify a select instruction when its condition operand is a
4046 /// floating-point comparison.
4047 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4048 const SimplifyQuery &Q) {
4049 FCmpInst::Predicate Pred;
4050 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4051 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4054 // This transform is safe if we do not have (do not care about) -0.0 or if
4055 // at least one operand is known to not be -0.0. Otherwise, the select can
4056 // change the sign of a zero operand.
4057 bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) &&
4058 Q.CxtI->hasNoSignedZeros();
4060 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4061 (match(F, m_APFloat(C)) && C->isNonZero())) {
4062 // (T == F) ? T : F --> F
4063 // (F == T) ? T : F --> F
4064 if (Pred == FCmpInst::FCMP_OEQ)
4067 // (T != F) ? T : F --> T
4068 // (F != T) ? T : F --> T
4069 if (Pred == FCmpInst::FCMP_UNE)
4076 /// Given operands for a SelectInst, see if we can fold the result.
4077 /// If not, this returns null.
4078 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4079 const SimplifyQuery &Q, unsigned MaxRecurse) {
4080 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4081 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4082 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4083 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
4085 // select undef, X, Y -> X or Y
4086 if (isa<UndefValue>(CondC))
4087 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4089 // TODO: Vector constants with undef elements don't simplify.
4091 // select true, X, Y -> X
4092 if (CondC->isAllOnesValue())
4094 // select false, X, Y -> Y
4095 if (CondC->isNullValue())
4099 // select i1 Cond, i1 true, i1 false --> i1 Cond
4100 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4101 "Select must have bool or bool vector condition");
4102 assert(TrueVal->getType() == FalseVal->getType() &&
4103 "Select must have same types for true/false ops");
4104 if (Cond->getType() == TrueVal->getType() &&
4105 match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4108 // select ?, X, X -> X
4109 if (TrueVal == FalseVal)
4112 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X
4114 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X
4117 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4118 Constant *TrueC, *FalseC;
4119 if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) &&
4120 match(FalseVal, m_Constant(FalseC))) {
4121 unsigned NumElts = cast<VectorType>(TrueC->getType())->getNumElements();
4122 SmallVector<Constant *, 16> NewC;
4123 for (unsigned i = 0; i != NumElts; ++i) {
4124 // Bail out on incomplete vector constants.
4125 Constant *TEltC = TrueC->getAggregateElement(i);
4126 Constant *FEltC = FalseC->getAggregateElement(i);
4127 if (!TEltC || !FEltC)
4130 // If the elements match (undef or not), that value is the result. If only
4131 // one element is undef, choose the defined element as the safe result.
4133 NewC.push_back(TEltC);
4134 else if (isa<UndefValue>(TEltC))
4135 NewC.push_back(FEltC);
4136 else if (isa<UndefValue>(FEltC))
4137 NewC.push_back(TEltC);
4141 if (NewC.size() == NumElts)
4142 return ConstantVector::get(NewC);
4146 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4149 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4152 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4155 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4157 return *Imp ? TrueVal : FalseVal;
4162 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4163 const SimplifyQuery &Q) {
4164 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4167 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4168 /// If not, this returns null.
4169 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4170 const SimplifyQuery &Q, unsigned) {
4171 // The type of the GEP pointer operand.
4173 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
4175 // getelementptr P -> P.
4176 if (Ops.size() == 1)
4179 // Compute the (pointer) type returned by the GEP instruction.
4180 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
4181 Type *GEPTy = PointerType::get(LastType, AS);
4182 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
4183 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4184 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
4185 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4187 if (isa<UndefValue>(Ops[0]))
4188 return UndefValue::get(GEPTy);
4190 bool IsScalableVec = isa<ScalableVectorType>(SrcTy);
4192 if (Ops.size() == 2) {
4193 // getelementptr P, 0 -> P.
4194 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
4198 if (!IsScalableVec && Ty->isSized()) {
4201 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4202 // getelementptr P, N -> P if P points to a type of zero size.
4203 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
4206 // The following transforms are only safe if the ptrtoint cast
4207 // doesn't truncate the pointers.
4208 if (Ops[1]->getType()->getScalarSizeInBits() ==
4209 Q.DL.getPointerSizeInBits(AS)) {
4210 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
4211 if (match(P, m_Zero()))
4212 return Constant::getNullValue(GEPTy);
4214 if (match(P, m_PtrToInt(m_Value(Temp))))
4215 if (Temp->getType() == GEPTy)
4220 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4221 if (TyAllocSize == 1 &&
4222 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
4223 if (Value *R = PtrToIntOrZero(P))
4226 // getelementptr V, (ashr (sub P, V), C) -> Q
4227 // if P points to a type of size 1 << C.
4229 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
4230 m_ConstantInt(C))) &&
4231 TyAllocSize == 1ULL << C)
4232 if (Value *R = PtrToIntOrZero(P))
4235 // getelementptr V, (sdiv (sub P, V), C) -> Q
4236 // if P points to a type of size C.
4238 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
4239 m_SpecificInt(TyAllocSize))))
4240 if (Value *R = PtrToIntOrZero(P))
4246 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4247 all_of(Ops.slice(1).drop_back(1),
4248 [](Value *Idx) { return match(Idx, m_Zero()); })) {
4250 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
4251 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
4252 APInt BasePtrOffset(IdxWidth, 0);
4253 Value *StrippedBasePtr =
4254 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
4257 // gep (gep V, C), (sub 0, V) -> C
4258 if (match(Ops.back(),
4259 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) {
4260 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4261 return ConstantExpr::getIntToPtr(CI, GEPTy);
4263 // gep (gep V, C), (xor V, -1) -> C-1
4264 if (match(Ops.back(),
4265 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) {
4266 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4267 return ConstantExpr::getIntToPtr(CI, GEPTy);
4272 // Check to see if this is constant foldable.
4273 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
4276 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
4278 return ConstantFoldConstant(CE, Q.DL);
4281 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4282 const SimplifyQuery &Q) {
4283 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
4286 /// Given operands for an InsertValueInst, see if we can fold the result.
4287 /// If not, this returns null.
4288 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
4289 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
4291 if (Constant *CAgg = dyn_cast<Constant>(Agg))
4292 if (Constant *CVal = dyn_cast<Constant>(Val))
4293 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
4295 // insertvalue x, undef, n -> x
4296 if (match(Val, m_Undef()))
4299 // insertvalue x, (extractvalue y, n), n
4300 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
4301 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
4302 EV->getIndices() == Idxs) {
4303 // insertvalue undef, (extractvalue y, n), n -> y
4304 if (match(Agg, m_Undef()))
4305 return EV->getAggregateOperand();
4307 // insertvalue y, (extractvalue y, n), n -> y
4308 if (Agg == EV->getAggregateOperand())
4315 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
4316 ArrayRef<unsigned> Idxs,
4317 const SimplifyQuery &Q) {
4318 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
4321 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
4322 const SimplifyQuery &Q) {
4323 // Try to constant fold.
4324 auto *VecC = dyn_cast<Constant>(Vec);
4325 auto *ValC = dyn_cast<Constant>(Val);
4326 auto *IdxC = dyn_cast<Constant>(Idx);
4327 if (VecC && ValC && IdxC)
4328 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC);
4330 // For fixed-length vector, fold into undef if index is out of bounds.
4331 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
4332 if (isa<FixedVectorType>(Vec->getType()) &&
4333 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
4334 return UndefValue::get(Vec->getType());
4337 // If index is undef, it might be out of bounds (see above case)
4338 if (isa<UndefValue>(Idx))
4339 return UndefValue::get(Vec->getType());
4341 // If the scalar is undef, and there is no risk of propagating poison from the
4342 // vector value, simplify to the vector value.
4343 if (isa<UndefValue>(Val) && isGuaranteedNotToBeUndefOrPoison(Vec))
4346 // If we are extracting a value from a vector, then inserting it into the same
4347 // place, that's the input vector:
4348 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
4349 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
4355 /// Given operands for an ExtractValueInst, see if we can fold the result.
4356 /// If not, this returns null.
4357 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4358 const SimplifyQuery &, unsigned) {
4359 if (auto *CAgg = dyn_cast<Constant>(Agg))
4360 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
4362 // extractvalue x, (insertvalue y, elt, n), n -> elt
4363 unsigned NumIdxs = Idxs.size();
4364 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
4365 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
4366 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
4367 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
4368 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
4369 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
4370 Idxs.slice(0, NumCommonIdxs)) {
4371 if (NumIdxs == NumInsertValueIdxs)
4372 return IVI->getInsertedValueOperand();
4380 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4381 const SimplifyQuery &Q) {
4382 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4385 /// Given operands for an ExtractElementInst, see if we can fold the result.
4386 /// If not, this returns null.
4387 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &,
4389 auto *VecVTy = cast<VectorType>(Vec->getType());
4390 if (auto *CVec = dyn_cast<Constant>(Vec)) {
4391 if (auto *CIdx = dyn_cast<Constant>(Idx))
4392 return ConstantFoldExtractElementInstruction(CVec, CIdx);
4394 // The index is not relevant if our vector is a splat.
4395 if (auto *Splat = CVec->getSplatValue())
4398 if (isa<UndefValue>(Vec))
4399 return UndefValue::get(VecVTy->getElementType());
4402 // If extracting a specified index from the vector, see if we can recursively
4403 // find a previously computed scalar that was inserted into the vector.
4404 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4405 // For fixed-length vector, fold into undef if index is out of bounds.
4406 if (isa<FixedVectorType>(VecVTy) &&
4407 IdxC->getValue().uge(VecVTy->getNumElements()))
4408 return UndefValue::get(VecVTy->getElementType());
4409 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4413 // An undef extract index can be arbitrarily chosen to be an out-of-range
4414 // index value, which would result in the instruction being undef.
4415 if (isa<UndefValue>(Idx))
4416 return UndefValue::get(VecVTy->getElementType());
4421 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4422 const SimplifyQuery &Q) {
4423 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4426 /// See if we can fold the given phi. If not, returns null.
4427 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
4428 // If all of the PHI's incoming values are the same then replace the PHI node
4429 // with the common value.
4430 Value *CommonValue = nullptr;
4431 bool HasUndefInput = false;
4432 for (Value *Incoming : PN->incoming_values()) {
4433 // If the incoming value is the phi node itself, it can safely be skipped.
4434 if (Incoming == PN) continue;
4435 if (isa<UndefValue>(Incoming)) {
4436 // Remember that we saw an undef value, but otherwise ignore them.
4437 HasUndefInput = true;
4440 if (CommonValue && Incoming != CommonValue)
4441 return nullptr; // Not the same, bail out.
4442 CommonValue = Incoming;
4445 // If CommonValue is null then all of the incoming values were either undef or
4446 // equal to the phi node itself.
4448 return UndefValue::get(PN->getType());
4450 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4451 // instruction, we cannot return X as the result of the PHI node unless it
4452 // dominates the PHI block.
4454 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4459 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4460 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4461 if (auto *C = dyn_cast<Constant>(Op))
4462 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4464 if (auto *CI = dyn_cast<CastInst>(Op)) {
4465 auto *Src = CI->getOperand(0);
4466 Type *SrcTy = Src->getType();
4467 Type *MidTy = CI->getType();
4469 if (Src->getType() == Ty) {
4470 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4471 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4473 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4475 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4477 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4478 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4479 SrcIntPtrTy, MidIntPtrTy,
4480 DstIntPtrTy) == Instruction::BitCast)
4486 if (CastOpc == Instruction::BitCast)
4487 if (Op->getType() == Ty)
4493 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4494 const SimplifyQuery &Q) {
4495 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4498 /// For the given destination element of a shuffle, peek through shuffles to
4499 /// match a root vector source operand that contains that element in the same
4500 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4501 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4502 int MaskVal, Value *RootVec,
4503 unsigned MaxRecurse) {
4507 // Bail out if any mask value is undefined. That kind of shuffle may be
4508 // simplified further based on demanded bits or other folds.
4512 // The mask value chooses which source operand we need to look at next.
4513 int InVecNumElts = cast<VectorType>(Op0->getType())->getNumElements();
4514 int RootElt = MaskVal;
4515 Value *SourceOp = Op0;
4516 if (MaskVal >= InVecNumElts) {
4517 RootElt = MaskVal - InVecNumElts;
4521 // If the source operand is a shuffle itself, look through it to find the
4522 // matching root vector.
4523 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4524 return foldIdentityShuffles(
4525 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4526 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4529 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4532 // The source operand is not a shuffle. Initialize the root vector value for
4533 // this shuffle if that has not been done yet.
4537 // Give up as soon as a source operand does not match the existing root value.
4538 if (RootVec != SourceOp)
4541 // The element must be coming from the same lane in the source vector
4542 // (although it may have crossed lanes in intermediate shuffles).
4543 if (RootElt != DestElt)
4549 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4550 ArrayRef<int> Mask, Type *RetTy,
4551 const SimplifyQuery &Q,
4552 unsigned MaxRecurse) {
4553 if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
4554 return UndefValue::get(RetTy);
4556 auto *InVecTy = cast<VectorType>(Op0->getType());
4557 unsigned MaskNumElts = Mask.size();
4558 ElementCount InVecEltCount = InVecTy->getElementCount();
4560 bool Scalable = InVecEltCount.Scalable;
4562 SmallVector<int, 32> Indices;
4563 Indices.assign(Mask.begin(), Mask.end());
4565 // Canonicalization: If mask does not select elements from an input vector,
4566 // replace that input vector with undef.
4568 bool MaskSelects0 = false, MaskSelects1 = false;
4569 unsigned InVecNumElts = InVecEltCount.Min;
4570 for (unsigned i = 0; i != MaskNumElts; ++i) {
4571 if (Indices[i] == -1)
4573 if ((unsigned)Indices[i] < InVecNumElts)
4574 MaskSelects0 = true;
4576 MaskSelects1 = true;
4579 Op0 = UndefValue::get(InVecTy);
4581 Op1 = UndefValue::get(InVecTy);
4584 auto *Op0Const = dyn_cast<Constant>(Op0);
4585 auto *Op1Const = dyn_cast<Constant>(Op1);
4587 // If all operands are constant, constant fold the shuffle. This
4588 // transformation depends on the value of the mask which is not known at
4589 // compile time for scalable vectors
4590 if (!Scalable && Op0Const && Op1Const)
4591 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
4593 // Canonicalization: if only one input vector is constant, it shall be the
4594 // second one. This transformation depends on the value of the mask which
4595 // is not known at compile time for scalable vectors
4596 if (!Scalable && Op0Const && !Op1Const) {
4597 std::swap(Op0, Op1);
4598 ShuffleVectorInst::commuteShuffleMask(Indices, InVecEltCount.Min);
4601 // A splat of an inserted scalar constant becomes a vector constant:
4602 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
4603 // NOTE: We may have commuted above, so analyze the updated Indices, not the
4604 // original mask constant.
4605 // NOTE: This transformation depends on the value of the mask which is not
4606 // known at compile time for scalable vectors
4608 ConstantInt *IndexC;
4609 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
4610 m_ConstantInt(IndexC)))) {
4611 // Match a splat shuffle mask of the insert index allowing undef elements.
4612 int InsertIndex = IndexC->getZExtValue();
4613 if (all_of(Indices, [InsertIndex](int MaskElt) {
4614 return MaskElt == InsertIndex || MaskElt == -1;
4616 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
4618 // Shuffle mask undefs become undefined constant result elements.
4619 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
4620 for (unsigned i = 0; i != MaskNumElts; ++i)
4621 if (Indices[i] == -1)
4622 VecC[i] = UndefValue::get(C->getType());
4623 return ConstantVector::get(VecC);
4627 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4628 // value type is same as the input vectors' type.
4629 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4630 if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
4631 is_splat(OpShuf->getShuffleMask()))
4634 // All remaining transformation depend on the value of the mask, which is
4635 // not known at compile time for scalable vectors.
4639 // Don't fold a shuffle with undef mask elements. This may get folded in a
4640 // better way using demanded bits or other analysis.
4641 // TODO: Should we allow this?
4642 if (find(Indices, -1) != Indices.end())
4645 // Check if every element of this shuffle can be mapped back to the
4646 // corresponding element of a single root vector. If so, we don't need this
4647 // shuffle. This handles simple identity shuffles as well as chains of
4648 // shuffles that may widen/narrow and/or move elements across lanes and back.
4649 Value *RootVec = nullptr;
4650 for (unsigned i = 0; i != MaskNumElts; ++i) {
4651 // Note that recursion is limited for each vector element, so if any element
4652 // exceeds the limit, this will fail to simplify.
4654 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4656 // We can't replace a widening/narrowing shuffle with one of its operands.
4657 if (!RootVec || RootVec->getType() != RetTy)
4663 /// Given operands for a ShuffleVectorInst, fold the result or return null.
4664 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4665 ArrayRef<int> Mask, Type *RetTy,
4666 const SimplifyQuery &Q) {
4667 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4670 static Constant *foldConstant(Instruction::UnaryOps Opcode,
4671 Value *&Op, const SimplifyQuery &Q) {
4672 if (auto *C = dyn_cast<Constant>(Op))
4673 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
4677 /// Given the operand for an FNeg, see if we can fold the result. If not, this
4679 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
4680 const SimplifyQuery &Q, unsigned MaxRecurse) {
4681 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
4685 // fneg (fneg X) ==> X
4686 if (match(Op, m_FNeg(m_Value(X))))
4692 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF,
4693 const SimplifyQuery &Q) {
4694 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
4697 static Constant *propagateNaN(Constant *In) {
4698 // If the input is a vector with undef elements, just return a default NaN.
4700 return ConstantFP::getNaN(In->getType());
4702 // Propagate the existing NaN constant when possible.
4703 // TODO: Should we quiet a signaling NaN?
4707 /// Perform folds that are common to any floating-point operation. This implies
4708 /// transforms based on undef/NaN because the operation itself makes no
4709 /// difference to the result.
4710 static Constant *simplifyFPOp(ArrayRef<Value *> Ops,
4711 FastMathFlags FMF = FastMathFlags()) {
4712 for (Value *V : Ops) {
4713 bool IsNan = match(V, m_NaN());
4714 bool IsInf = match(V, m_Inf());
4715 bool IsUndef = match(V, m_Undef());
4717 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
4718 // (an undef operand can be chosen to be Nan/Inf), then the result of
4719 // this operation is poison. That result can be relaxed to undef.
4720 if (FMF.noNaNs() && (IsNan || IsUndef))
4721 return UndefValue::get(V->getType());
4722 if (FMF.noInfs() && (IsInf || IsUndef))
4723 return UndefValue::get(V->getType());
4725 if (IsUndef || IsNan)
4726 return propagateNaN(cast<Constant>(V));
4731 /// Given operands for an FAdd, see if we can fold the result. If not, this
4733 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4734 const SimplifyQuery &Q, unsigned MaxRecurse) {
4735 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
4738 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF))
4742 if (match(Op1, m_NegZeroFP()))
4745 // fadd X, 0 ==> X, when we know X is not -0
4746 if (match(Op1, m_PosZeroFP()) &&
4747 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4750 // With nnan: -X + X --> 0.0 (and commuted variant)
4751 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
4752 // Negative zeros are allowed because we always end up with positive zero:
4753 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4754 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4755 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
4756 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
4758 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
4759 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
4760 return ConstantFP::getNullValue(Op0->getType());
4762 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
4763 match(Op1, m_FNeg(m_Specific(Op0))))
4764 return ConstantFP::getNullValue(Op0->getType());
4767 // (X - Y) + Y --> X
4768 // Y + (X - Y) --> X
4770 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4771 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
4772 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
4778 /// Given operands for an FSub, see if we can fold the result. If not, this
4780 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4781 const SimplifyQuery &Q, unsigned MaxRecurse) {
4782 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
4785 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF))
4789 if (match(Op1, m_PosZeroFP()))
4792 // fsub X, -0 ==> X, when we know X is not -0
4793 if (match(Op1, m_NegZeroFP()) &&
4794 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4797 // fsub -0.0, (fsub -0.0, X) ==> X
4798 // fsub -0.0, (fneg X) ==> X
4800 if (match(Op0, m_NegZeroFP()) &&
4801 match(Op1, m_FNeg(m_Value(X))))
4804 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
4805 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
4806 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
4807 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
4808 match(Op1, m_FNeg(m_Value(X)))))
4811 // fsub nnan x, x ==> 0.0
4812 if (FMF.noNaNs() && Op0 == Op1)
4813 return Constant::getNullValue(Op0->getType());
4815 // Y - (Y - X) --> X
4816 // (X + Y) - Y --> X
4817 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4818 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
4819 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
4825 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
4826 const SimplifyQuery &Q, unsigned MaxRecurse) {
4827 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF))
4830 // fmul X, 1.0 ==> X
4831 if (match(Op1, m_FPOne()))
4834 // fmul 1.0, X ==> X
4835 if (match(Op0, m_FPOne()))
4838 // fmul nnan nsz X, 0 ==> 0
4839 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
4840 return ConstantFP::getNullValue(Op0->getType());
4842 // fmul nnan nsz 0, X ==> 0
4843 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4844 return ConstantFP::getNullValue(Op1->getType());
4846 // sqrt(X) * sqrt(X) --> X, if we can:
4847 // 1. Remove the intermediate rounding (reassociate).
4848 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
4849 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
4851 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
4852 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
4858 /// Given the operands for an FMul, see if we can fold the result
4859 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4860 const SimplifyQuery &Q, unsigned MaxRecurse) {
4861 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
4864 // Now apply simplifications that do not require rounding.
4865 return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse);
4868 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4869 const SimplifyQuery &Q) {
4870 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
4874 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4875 const SimplifyQuery &Q) {
4876 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
4879 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4880 const SimplifyQuery &Q) {
4881 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
4884 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
4885 const SimplifyQuery &Q) {
4886 return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit);
4889 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4890 const SimplifyQuery &Q, unsigned) {
4891 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
4894 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF))
4898 if (match(Op1, m_FPOne()))
4902 // Requires that NaNs are off (X could be zero) and signed zeroes are
4903 // ignored (X could be positive or negative, so the output sign is unknown).
4904 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4905 return ConstantFP::getNullValue(Op0->getType());
4908 // X / X -> 1.0 is legal when NaNs are ignored.
4909 // We can ignore infinities because INF/INF is NaN.
4911 return ConstantFP::get(Op0->getType(), 1.0);
4913 // (X * Y) / Y --> X if we can reassociate to the above form.
4915 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
4918 // -X / X -> -1.0 and
4919 // X / -X -> -1.0 are legal when NaNs are ignored.
4920 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
4921 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
4922 match(Op1, m_FNegNSZ(m_Specific(Op0))))
4923 return ConstantFP::get(Op0->getType(), -1.0);
4929 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4930 const SimplifyQuery &Q) {
4931 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
4934 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4935 const SimplifyQuery &Q, unsigned) {
4936 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
4939 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF))
4942 // Unlike fdiv, the result of frem always matches the sign of the dividend.
4943 // The constant match may include undef elements in a vector, so return a full
4944 // zero constant as the result.
4947 if (match(Op0, m_PosZeroFP()))
4948 return ConstantFP::getNullValue(Op0->getType());
4950 if (match(Op0, m_NegZeroFP()))
4951 return ConstantFP::getNegativeZero(Op0->getType());
4957 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4958 const SimplifyQuery &Q) {
4959 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
4962 //=== Helper functions for higher up the class hierarchy.
4964 /// Given the operand for a UnaryOperator, see if we can fold the result.
4965 /// If not, this returns null.
4966 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
4967 unsigned MaxRecurse) {
4969 case Instruction::FNeg:
4970 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
4972 llvm_unreachable("Unexpected opcode");
4976 /// Given the operand for a UnaryOperator, see if we can fold the result.
4977 /// If not, this returns null.
4978 /// Try to use FastMathFlags when folding the result.
4979 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
4980 const FastMathFlags &FMF,
4981 const SimplifyQuery &Q, unsigned MaxRecurse) {
4983 case Instruction::FNeg:
4984 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
4986 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
4990 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
4991 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
4994 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
4995 const SimplifyQuery &Q) {
4996 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
4999 /// Given operands for a BinaryOperator, see if we can fold the result.
5000 /// If not, this returns null.
5001 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5002 const SimplifyQuery &Q, unsigned MaxRecurse) {
5004 case Instruction::Add:
5005 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
5006 case Instruction::Sub:
5007 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
5008 case Instruction::Mul:
5009 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
5010 case Instruction::SDiv:
5011 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
5012 case Instruction::UDiv:
5013 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
5014 case Instruction::SRem:
5015 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
5016 case Instruction::URem:
5017 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
5018 case Instruction::Shl:
5019 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
5020 case Instruction::LShr:
5021 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
5022 case Instruction::AShr:
5023 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
5024 case Instruction::And:
5025 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
5026 case Instruction::Or:
5027 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
5028 case Instruction::Xor:
5029 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
5030 case Instruction::FAdd:
5031 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5032 case Instruction::FSub:
5033 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5034 case Instruction::FMul:
5035 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5036 case Instruction::FDiv:
5037 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5038 case Instruction::FRem:
5039 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5041 llvm_unreachable("Unexpected opcode");
5045 /// Given operands for a BinaryOperator, see if we can fold the result.
5046 /// If not, this returns null.
5047 /// Try to use FastMathFlags when folding the result.
5048 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5049 const FastMathFlags &FMF, const SimplifyQuery &Q,
5050 unsigned MaxRecurse) {
5052 case Instruction::FAdd:
5053 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5054 case Instruction::FSub:
5055 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5056 case Instruction::FMul:
5057 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5058 case Instruction::FDiv:
5059 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5061 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5065 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5066 const SimplifyQuery &Q) {
5067 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5070 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5071 FastMathFlags FMF, const SimplifyQuery &Q) {
5072 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5075 /// Given operands for a CmpInst, see if we can fold the result.
5076 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5077 const SimplifyQuery &Q, unsigned MaxRecurse) {
5078 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5079 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5080 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5083 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5084 const SimplifyQuery &Q) {
5085 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5088 static bool IsIdempotent(Intrinsic::ID ID) {
5090 default: return false;
5092 // Unary idempotent: f(f(x)) = f(x)
5093 case Intrinsic::fabs:
5094 case Intrinsic::floor:
5095 case Intrinsic::ceil:
5096 case Intrinsic::trunc:
5097 case Intrinsic::rint:
5098 case Intrinsic::nearbyint:
5099 case Intrinsic::round:
5100 case Intrinsic::roundeven:
5101 case Intrinsic::canonicalize:
5106 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
5107 const DataLayout &DL) {
5108 GlobalValue *PtrSym;
5110 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
5113 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
5114 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
5115 Type *Int32PtrTy = Int32Ty->getPointerTo();
5116 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
5118 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
5119 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
5122 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
5123 if (OffsetInt % 4 != 0)
5126 Constant *C = ConstantExpr::getGetElementPtr(
5127 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
5128 ConstantInt::get(Int64Ty, OffsetInt / 4));
5129 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
5133 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
5137 if (LoadedCE->getOpcode() == Instruction::Trunc) {
5138 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5143 if (LoadedCE->getOpcode() != Instruction::Sub)
5146 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5147 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
5149 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
5151 Constant *LoadedRHS = LoadedCE->getOperand(1);
5152 GlobalValue *LoadedRHSSym;
5153 APInt LoadedRHSOffset;
5154 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
5156 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
5159 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
5162 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
5163 const SimplifyQuery &Q) {
5164 // Idempotent functions return the same result when called repeatedly.
5165 Intrinsic::ID IID = F->getIntrinsicID();
5166 if (IsIdempotent(IID))
5167 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
5168 if (II->getIntrinsicID() == IID)
5173 case Intrinsic::fabs:
5174 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0;
5176 case Intrinsic::bswap:
5177 // bswap(bswap(x)) -> x
5178 if (match(Op0, m_BSwap(m_Value(X)))) return X;
5180 case Intrinsic::bitreverse:
5181 // bitreverse(bitreverse(x)) -> x
5182 if (match(Op0, m_BitReverse(m_Value(X)))) return X;
5184 case Intrinsic::exp:
5186 if (Q.CxtI->hasAllowReassoc() &&
5187 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X;
5189 case Intrinsic::exp2:
5190 // exp2(log2(x)) -> x
5191 if (Q.CxtI->hasAllowReassoc() &&
5192 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X;
5194 case Intrinsic::log:
5196 if (Q.CxtI->hasAllowReassoc() &&
5197 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X;
5199 case Intrinsic::log2:
5200 // log2(exp2(x)) -> x
5201 if (Q.CxtI->hasAllowReassoc() &&
5202 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
5203 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0),
5204 m_Value(X))))) return X;
5206 case Intrinsic::log10:
5207 // log10(pow(10.0, x)) -> x
5208 if (Q.CxtI->hasAllowReassoc() &&
5209 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0),
5210 m_Value(X)))) return X;
5212 case Intrinsic::floor:
5213 case Intrinsic::trunc:
5214 case Intrinsic::ceil:
5215 case Intrinsic::round:
5216 case Intrinsic::roundeven:
5217 case Intrinsic::nearbyint:
5218 case Intrinsic::rint: {
5219 // floor (sitofp x) -> sitofp x
5220 // floor (uitofp x) -> uitofp x
5222 // Converting from int always results in a finite integral number or
5223 // infinity. For either of those inputs, these rounding functions always
5224 // return the same value, so the rounding can be eliminated.
5225 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
5236 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
5237 const SimplifyQuery &Q) {
5238 Intrinsic::ID IID = F->getIntrinsicID();
5239 Type *ReturnType = F->getReturnType();
5241 case Intrinsic::usub_with_overflow:
5242 case Intrinsic::ssub_with_overflow:
5243 // X - X -> { 0, false }
5245 return Constant::getNullValue(ReturnType);
5247 case Intrinsic::uadd_with_overflow:
5248 case Intrinsic::sadd_with_overflow:
5249 // X - undef -> { undef, false }
5250 // undef - X -> { undef, false }
5251 // X + undef -> { undef, false }
5252 // undef + x -> { undef, false }
5253 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1)) {
5254 return ConstantStruct::get(
5255 cast<StructType>(ReturnType),
5256 {UndefValue::get(ReturnType->getStructElementType(0)),
5257 Constant::getNullValue(ReturnType->getStructElementType(1))});
5260 case Intrinsic::umul_with_overflow:
5261 case Intrinsic::smul_with_overflow:
5262 // 0 * X -> { 0, false }
5263 // X * 0 -> { 0, false }
5264 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
5265 return Constant::getNullValue(ReturnType);
5266 // undef * X -> { 0, false }
5267 // X * undef -> { 0, false }
5268 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
5269 return Constant::getNullValue(ReturnType);
5271 case Intrinsic::uadd_sat:
5272 // sat(MAX + X) -> MAX
5273 // sat(X + MAX) -> MAX
5274 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
5275 return Constant::getAllOnesValue(ReturnType);
5277 case Intrinsic::sadd_sat:
5278 // sat(X + undef) -> -1
5279 // sat(undef + X) -> -1
5280 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
5281 // For signed: Assume undef is ~X, in which case X + ~X = -1.
5282 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
5283 return Constant::getAllOnesValue(ReturnType);
5286 if (match(Op1, m_Zero()))
5289 if (match(Op0, m_Zero()))
5292 case Intrinsic::usub_sat:
5293 // sat(0 - X) -> 0, sat(X - MAX) -> 0
5294 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
5295 return Constant::getNullValue(ReturnType);
5297 case Intrinsic::ssub_sat:
5298 // X - X -> 0, X - undef -> 0, undef - X -> 0
5299 if (Op0 == Op1 || match(Op0, m_Undef()) || match(Op1, m_Undef()))
5300 return Constant::getNullValue(ReturnType);
5302 if (match(Op1, m_Zero()))
5305 case Intrinsic::load_relative:
5306 if (auto *C0 = dyn_cast<Constant>(Op0))
5307 if (auto *C1 = dyn_cast<Constant>(Op1))
5308 return SimplifyRelativeLoad(C0, C1, Q.DL);
5310 case Intrinsic::powi:
5311 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
5312 // powi(x, 0) -> 1.0
5313 if (Power->isZero())
5314 return ConstantFP::get(Op0->getType(), 1.0);
5320 case Intrinsic::copysign:
5321 // copysign X, X --> X
5324 // copysign -X, X --> X
5325 // copysign X, -X --> -X
5326 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5327 match(Op1, m_FNeg(m_Specific(Op0))))
5330 case Intrinsic::maxnum:
5331 case Intrinsic::minnum:
5332 case Intrinsic::maximum:
5333 case Intrinsic::minimum: {
5334 // If the arguments are the same, this is a no-op.
5335 if (Op0 == Op1) return Op0;
5337 // If one argument is undef, return the other argument.
5338 if (match(Op0, m_Undef()))
5340 if (match(Op1, m_Undef()))
5343 // If one argument is NaN, return other or NaN appropriately.
5344 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
5345 if (match(Op0, m_NaN()))
5346 return PropagateNaN ? Op0 : Op1;
5347 if (match(Op1, m_NaN()))
5348 return PropagateNaN ? Op1 : Op0;
5350 // Min/max of the same operation with common operand:
5351 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
5352 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0))
5353 if (M0->getIntrinsicID() == IID &&
5354 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1))
5356 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1))
5357 if (M1->getIntrinsicID() == IID &&
5358 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0))
5361 // min(X, -Inf) --> -Inf (and commuted variant)
5362 // max(X, +Inf) --> +Inf (and commuted variant)
5363 bool UseNegInf = IID == Intrinsic::minnum || IID == Intrinsic::minimum;
5365 if ((match(Op0, m_APFloat(C)) && C->isInfinity() &&
5366 C->isNegative() == UseNegInf) ||
5367 (match(Op1, m_APFloat(C)) && C->isInfinity() &&
5368 C->isNegative() == UseNegInf))
5369 return ConstantFP::getInfinity(ReturnType, UseNegInf);
5371 // TODO: minnum(nnan x, inf) -> x
5372 // TODO: minnum(nnan ninf x, flt_max) -> x
5373 // TODO: maxnum(nnan x, -inf) -> x
5374 // TODO: maxnum(nnan ninf x, -flt_max) -> x
5384 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
5386 // Intrinsics with no operands have some kind of side effect. Don't simplify.
5387 unsigned NumOperands = Call->getNumArgOperands();
5391 Function *F = cast<Function>(Call->getCalledFunction());
5392 Intrinsic::ID IID = F->getIntrinsicID();
5393 if (NumOperands == 1)
5394 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q);
5396 if (NumOperands == 2)
5397 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0),
5398 Call->getArgOperand(1), Q);
5400 // Handle intrinsics with 3 or more arguments.
5402 case Intrinsic::masked_load:
5403 case Intrinsic::masked_gather: {
5404 Value *MaskArg = Call->getArgOperand(2);
5405 Value *PassthruArg = Call->getArgOperand(3);
5406 // If the mask is all zeros or undef, the "passthru" argument is the result.
5407 if (maskIsAllZeroOrUndef(MaskArg))
5411 case Intrinsic::fshl:
5412 case Intrinsic::fshr: {
5413 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1),
5414 *ShAmtArg = Call->getArgOperand(2);
5416 // If both operands are undef, the result is undef.
5417 if (match(Op0, m_Undef()) && match(Op1, m_Undef()))
5418 return UndefValue::get(F->getReturnType());
5420 // If shift amount is undef, assume it is zero.
5421 if (match(ShAmtArg, m_Undef()))
5422 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5424 const APInt *ShAmtC;
5425 if (match(ShAmtArg, m_APInt(ShAmtC))) {
5426 // If there's effectively no shift, return the 1st arg or 2nd arg.
5427 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
5428 if (ShAmtC->urem(BitWidth).isNullValue())
5429 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5433 case Intrinsic::fma:
5434 case Intrinsic::fmuladd: {
5435 Value *Op0 = Call->getArgOperand(0);
5436 Value *Op1 = Call->getArgOperand(1);
5437 Value *Op2 = Call->getArgOperand(2);
5438 if (Value *V = simplifyFPOp({ Op0, Op1, Op2 }))
5447 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) {
5448 Value *Callee = Call->getCalledOperand();
5450 // musttail calls can only be simplified if they are also DCEd.
5451 // As we can't guarantee this here, don't simplify them.
5452 if (Call->isMustTailCall())
5455 // call undef -> undef
5456 // call null -> undef
5457 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
5458 return UndefValue::get(Call->getType());
5460 Function *F = dyn_cast<Function>(Callee);
5464 if (F->isIntrinsic())
5465 if (Value *Ret = simplifyIntrinsic(Call, Q))
5468 if (!canConstantFoldCallTo(Call, F))
5471 SmallVector<Constant *, 4> ConstantArgs;
5472 unsigned NumArgs = Call->getNumArgOperands();
5473 ConstantArgs.reserve(NumArgs);
5474 for (auto &Arg : Call->args()) {
5475 Constant *C = dyn_cast<Constant>(&Arg);
5477 if (isa<MetadataAsValue>(Arg.get()))
5481 ConstantArgs.push_back(C);
5484 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
5487 /// Given operands for a Freeze, see if we can fold the result.
5488 static Value *SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5489 // Use a utility function defined in ValueTracking.
5490 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.CxtI, Q.DT))
5492 // We have room for improvement.
5496 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5497 return ::SimplifyFreezeInst(Op0, Q);
5500 /// See if we can compute a simplified version of this instruction.
5501 /// If not, this returns null.
5503 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
5504 OptimizationRemarkEmitter *ORE) {
5505 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
5508 switch (I->getOpcode()) {
5510 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
5512 case Instruction::FNeg:
5513 Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q);
5515 case Instruction::FAdd:
5516 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
5517 I->getFastMathFlags(), Q);
5519 case Instruction::Add:
5521 SimplifyAddInst(I->getOperand(0), I->getOperand(1),
5522 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5523 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5525 case Instruction::FSub:
5526 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
5527 I->getFastMathFlags(), Q);
5529 case Instruction::Sub:
5531 SimplifySubInst(I->getOperand(0), I->getOperand(1),
5532 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5533 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5535 case Instruction::FMul:
5536 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
5537 I->getFastMathFlags(), Q);
5539 case Instruction::Mul:
5540 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
5542 case Instruction::SDiv:
5543 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
5545 case Instruction::UDiv:
5546 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
5548 case Instruction::FDiv:
5549 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
5550 I->getFastMathFlags(), Q);
5552 case Instruction::SRem:
5553 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
5555 case Instruction::URem:
5556 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
5558 case Instruction::FRem:
5559 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
5560 I->getFastMathFlags(), Q);
5562 case Instruction::Shl:
5564 SimplifyShlInst(I->getOperand(0), I->getOperand(1),
5565 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5566 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5568 case Instruction::LShr:
5569 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
5570 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
5572 case Instruction::AShr:
5573 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
5574 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
5576 case Instruction::And:
5577 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
5579 case Instruction::Or:
5580 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
5582 case Instruction::Xor:
5583 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
5585 case Instruction::ICmp:
5586 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
5587 I->getOperand(0), I->getOperand(1), Q);
5589 case Instruction::FCmp:
5591 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
5592 I->getOperand(1), I->getFastMathFlags(), Q);
5594 case Instruction::Select:
5595 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
5596 I->getOperand(2), Q);
5598 case Instruction::GetElementPtr: {
5599 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end());
5600 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
5604 case Instruction::InsertValue: {
5605 InsertValueInst *IV = cast<InsertValueInst>(I);
5606 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
5607 IV->getInsertedValueOperand(),
5608 IV->getIndices(), Q);
5611 case Instruction::InsertElement: {
5612 auto *IE = cast<InsertElementInst>(I);
5613 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1),
5614 IE->getOperand(2), Q);
5617 case Instruction::ExtractValue: {
5618 auto *EVI = cast<ExtractValueInst>(I);
5619 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
5620 EVI->getIndices(), Q);
5623 case Instruction::ExtractElement: {
5624 auto *EEI = cast<ExtractElementInst>(I);
5625 Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
5626 EEI->getIndexOperand(), Q);
5629 case Instruction::ShuffleVector: {
5630 auto *SVI = cast<ShuffleVectorInst>(I);
5632 SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
5633 SVI->getShuffleMask(), SVI->getType(), Q);
5636 case Instruction::PHI:
5637 Result = SimplifyPHINode(cast<PHINode>(I), Q);
5639 case Instruction::Call: {
5640 Result = SimplifyCall(cast<CallInst>(I), Q);
5643 case Instruction::Freeze:
5644 Result = SimplifyFreezeInst(I->getOperand(0), Q);
5646 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
5647 #include "llvm/IR/Instruction.def"
5648 #undef HANDLE_CAST_INST
5650 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
5652 case Instruction::Alloca:
5653 // No simplifications for Alloca and it can't be constant folded.
5658 /// If called on unreachable code, the above logic may report that the
5659 /// instruction simplified to itself. Make life easier for users by
5660 /// detecting that case here, returning a safe value instead.
5661 return Result == I ? UndefValue::get(I->getType()) : Result;
5664 /// Implementation of recursive simplification through an instruction's
5667 /// This is the common implementation of the recursive simplification routines.
5668 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
5669 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
5670 /// instructions to process and attempt to simplify it using
5671 /// InstructionSimplify. Recursively visited users which could not be
5672 /// simplified themselves are to the optional UnsimplifiedUsers set for
5673 /// further processing by the caller.
5675 /// This routine returns 'true' only when *it* simplifies something. The passed
5676 /// in simplified value does not count toward this.
5677 static bool replaceAndRecursivelySimplifyImpl(
5678 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
5679 const DominatorTree *DT, AssumptionCache *AC,
5680 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
5681 bool Simplified = false;
5682 SmallSetVector<Instruction *, 8> Worklist;
5683 const DataLayout &DL = I->getModule()->getDataLayout();
5685 // If we have an explicit value to collapse to, do that round of the
5686 // simplification loop by hand initially.
5688 for (User *U : I->users())
5690 Worklist.insert(cast<Instruction>(U));
5692 // Replace the instruction with its simplified value.
5693 I->replaceAllUsesWith(SimpleV);
5695 // Gracefully handle edge cases where the instruction is not wired into any
5697 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
5698 !I->mayHaveSideEffects())
5699 I->eraseFromParent();
5704 // Note that we must test the size on each iteration, the worklist can grow.
5705 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
5708 // See if this instruction simplifies.
5709 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
5711 if (UnsimplifiedUsers)
5712 UnsimplifiedUsers->insert(I);
5718 // Stash away all the uses of the old instruction so we can check them for
5719 // recursive simplifications after a RAUW. This is cheaper than checking all
5720 // uses of To on the recursive step in most cases.
5721 for (User *U : I->users())
5722 Worklist.insert(cast<Instruction>(U));
5724 // Replace the instruction with its simplified value.
5725 I->replaceAllUsesWith(SimpleV);
5727 // Gracefully handle edge cases where the instruction is not wired into any
5729 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
5730 !I->mayHaveSideEffects())
5731 I->eraseFromParent();
5736 bool llvm::recursivelySimplifyInstruction(Instruction *I,
5737 const TargetLibraryInfo *TLI,
5738 const DominatorTree *DT,
5739 AssumptionCache *AC) {
5740 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr);
5743 bool llvm::replaceAndRecursivelySimplify(
5744 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
5745 const DominatorTree *DT, AssumptionCache *AC,
5746 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
5747 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
5748 assert(SimpleV && "Must provide a simplified value.");
5749 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
5754 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
5755 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
5756 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
5757 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
5758 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
5759 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
5760 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
5761 return {F.getParent()->getDataLayout(), TLI, DT, AC};
5764 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
5765 const DataLayout &DL) {
5766 return {DL, &AR.TLI, &AR.DT, &AR.AC};
5769 template <class T, class... TArgs>
5770 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
5772 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
5773 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
5774 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
5775 return {F.getParent()->getDataLayout(), TLI, DT, AC};
5777 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,