1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for add, fadd, sub, and fsub.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/InstrTypes.h"
24 #include "llvm/IR/Instruction.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Support/AlignOf.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/KnownBits.h"
37 using namespace PatternMatch;
39 #define DEBUG_TYPE "instcombine"
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
70 void set(const APFloat& C);
74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
77 bool isOne() const { return isInt() && IntVal == 1; }
78 bool isTwo() const { return isInt() && IntVal == 2; }
79 bool isMinusOne() const { return isInt() && IntVal == -1; }
80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
85 APFloat *getFpValPtr()
86 { return reinterpret_cast<APFloat *>(&FpValBuf.buffer[0]); }
88 const APFloat *getFpValPtr() const
89 { return reinterpret_cast<const APFloat *>(&FpValBuf.buffer[0]); }
91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
101 bool isInt() const { return !IsFp; }
103 // If the coefficient is represented by an integer, promote it to a
105 void convertToFpType(const fltSemantics &Sem);
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
123 AlignedCharArrayUnion<APFloat> FpValBuf;
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
138 Value *getSymVal() const { return Val; }
139 const FAddendCoef &getCoef() const { return Coeff; }
141 bool isConstant() const { return Val == nullptr; }
142 bool isZero() const { return Coeff.isZero(); }
144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
157 void negate() { Coeff.negate(); }
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
182 Value *simplify(Instruction *FAdd);
185 using AddendVect = SmallVector<const FAddend *, 4>;
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
189 /// Convert given addend to a Value
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect& Vect);
195 Value *createFSub(Value *Opnd0, Value *Opnd1);
196 Value *createFAdd(Value *Opnd0, Value *Opnd1);
197 Value *createFMul(Value *Opnd0, Value *Opnd1);
198 Value *createFNeg(Value *V);
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
202 // Debugging stuff are clustered here.
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
212 InstCombiner::BuilderTy &Builder;
213 Instruction *Instr = nullptr;
216 } // end anonymous namespace
218 //===----------------------------------------------------------------------===//
221 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
223 //===----------------------------------------------------------------------===//
224 FAddendCoef::~FAddendCoef() {
226 getFpValPtr()->~APFloat();
229 void FAddendCoef::set(const APFloat& C) {
230 APFloat *P = getFpValPtr();
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
239 IsFp = BufHasFpVal = true;
242 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
246 APFloat *P = getFpValPtr();
248 new(P) APFloat(Sem, IntVal);
250 new(P) APFloat(Sem, 0 - IntVal);
253 IsFp = BufHasFpVal = true;
256 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
258 return APFloat(Sem, Val);
260 APFloat T(Sem, 0 - Val);
266 void FAddendCoef::operator=(const FAddendCoef &That) {
270 set(That.getFpVal());
273 void FAddendCoef::operator+=(const FAddendCoef &That) {
274 enum APFloat::roundingMode RndMode = APFloat::rmNearestTiesToEven;
275 if (isInt() == That.isInt()) {
277 IntVal += That.IntVal;
279 getFpVal().add(That.getFpVal(), RndMode);
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
290 APFloat &T = getFpVal();
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
294 void FAddendCoef::operator*=(const FAddendCoef &That) {
298 if (That.isMinusOne()) {
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
310 const fltSemantics &Semantic =
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
324 void FAddendCoef::negate() {
328 getFpVal().changeSign();
331 Value *FAddendCoef::getValue(Type *Ty) const {
333 ConstantFP::get(Ty, float(IntVal)) :
334 ConstantFP::get(Ty->getContext(), getFpVal());
337 // The definition of <Val> Addends
338 // =========================================
339 // A + B <1, A>, <1,B>
340 // A - B <1, A>, <1,B>
343 // A + C <1, A> <C, NULL>
344 // 0 +/- 0 <0, NULL> (corner case)
346 // Legend: A and B are not constant, C is constant
347 unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349 Instruction *I = nullptr;
350 if (!Val || !(I = dyn_cast<Instruction>(Val)))
353 unsigned Opcode = I->getOpcode();
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
367 Addend0.set(1, Opnd0);
369 Addend0.set(C0, nullptr);
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
375 Addend.set(1, Opnd1);
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
383 return Opnd0 && Opnd1 ? 2 : 1;
385 // Both operands are zero. Weird!
386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
407 // Try to break *this* addend into two addends. e.g. Suppose this addend is
408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409 // i.e. <2.3, X> and <2.3, Y>.
410 unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
419 Addend0.Scale(Coeff);
422 Addend1.Scale(Coeff);
427 Value *FAddCombine::simplify(Instruction *I) {
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
431 // Currently we are not able to handle vector type.
432 if (I->getType()->isVectorTy())
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
438 // Save the instruction before calling other member-functions.
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota = 0;
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
518 // For saving intermediate results;
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
522 // Points to the constant addend of the resulting simplified expression.
523 // If the resulting expr has constant-addend, this constant-addend is
524 // desirable to reside at the top of the resulting expression tree. Placing
525 // constant close to supper-expr(s) will potentially reveal some optimization
526 // opportunities in super-expr(s).
527 const FAddend *ConstAdd = nullptr;
529 // Simplified addends are placed <SimpVect>.
532 // The outer loop works on one symbolic-value at a time. Suppose the input
533 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
534 // The symbolic-values will be processed in this order: x, y, z.
535 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
537 const FAddend *ThisAddend = Addends[SymIdx];
539 // This addend was processed before.
543 Value *Val = ThisAddend->getSymVal();
544 unsigned StartIdx = SimpVect.size();
545 SimpVect.push_back(ThisAddend);
547 // The inner loop collects addends sharing same symbolic-value, and these
548 // addends will be later on folded into a single addend. Following above
549 // example, if the symbolic value "y" is being processed, the inner loop
550 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
551 // be later on folded into "<b1+b2, y>".
552 for (unsigned SameSymIdx = SymIdx + 1;
553 SameSymIdx < AddendNum; SameSymIdx++) {
554 const FAddend *T = Addends[SameSymIdx];
555 if (T && T->getSymVal() == Val) {
556 // Set null such that next iteration of the outer loop will not process
557 // this addend again.
558 Addends[SameSymIdx] = nullptr;
559 SimpVect.push_back(T);
563 // If multiple addends share same symbolic value, fold them together.
564 if (StartIdx + 1 != SimpVect.size()) {
565 FAddend &R = TmpResult[NextTmpIdx ++];
566 R = *SimpVect[StartIdx];
567 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
570 // Pop all addends being folded and push the resulting folded addend.
571 SimpVect.resize(StartIdx);
574 SimpVect.push_back(&R);
577 // Don't push constant addend at this time. It will be the last element
584 assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
585 "out-of-bound access");
588 SimpVect.push_back(ConstAdd);
591 if (!SimpVect.empty())
592 Result = createNaryFAdd(SimpVect, InstrQuota);
594 // The addition is folded to 0.0.
595 Result = ConstantFP::get(Instr->getType(), 0.0);
601 Value *FAddCombine::createNaryFAdd
602 (const AddendVect &Opnds, unsigned InstrQuota) {
603 assert(!Opnds.empty() && "Expect at least one addend");
605 // Step 1: Check if the # of instructions needed exceeds the quota.
607 unsigned InstrNeeded = calcInstrNumber(Opnds);
608 if (InstrNeeded > InstrQuota)
613 // step 2: Emit the N-ary addition.
614 // Note that at most three instructions are involved in Fadd-InstCombine: the
615 // addition in question, and at most two neighboring instructions.
616 // The resulting optimized addition should have at least one less instruction
617 // than the original addition expression tree. This implies that the resulting
618 // N-ary addition has at most two instructions, and we don't need to worry
619 // about tree-height when constructing the N-ary addition.
621 Value *LastVal = nullptr;
622 bool LastValNeedNeg = false;
624 // Iterate the addends, creating fadd/fsub using adjacent two addends.
625 for (const FAddend *Opnd : Opnds) {
627 Value *V = createAddendVal(*Opnd, NeedNeg);
630 LastValNeedNeg = NeedNeg;
634 if (LastValNeedNeg == NeedNeg) {
635 LastVal = createFAdd(LastVal, V);
640 LastVal = createFSub(V, LastVal);
642 LastVal = createFSub(LastVal, V);
644 LastValNeedNeg = false;
647 if (LastValNeedNeg) {
648 LastVal = createFNeg(LastVal);
652 assert(CreateInstrNum == InstrNeeded &&
653 "Inconsistent in instruction numbers");
659 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
660 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
661 if (Instruction *I = dyn_cast<Instruction>(V))
662 createInstPostProc(I);
666 Value *FAddCombine::createFNeg(Value *V) {
667 Value *Zero = cast<Value>(ConstantFP::getZeroValueForNegation(V->getType()));
668 Value *NewV = createFSub(Zero, V);
669 if (Instruction *I = dyn_cast<Instruction>(NewV))
670 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
674 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
675 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
676 if (Instruction *I = dyn_cast<Instruction>(V))
677 createInstPostProc(I);
681 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
682 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
683 if (Instruction *I = dyn_cast<Instruction>(V))
684 createInstPostProc(I);
688 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
689 NewInstr->setDebugLoc(Instr->getDebugLoc());
691 // Keep track of the number of instruction created.
695 // Propagate fast-math flags
696 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
699 // Return the number of instruction needed to emit the N-ary addition.
700 // NOTE: Keep this function in sync with createAddendVal().
701 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
702 unsigned OpndNum = Opnds.size();
703 unsigned InstrNeeded = OpndNum - 1;
705 // The number of addends in the form of "(-1)*x".
706 unsigned NegOpndNum = 0;
708 // Adjust the number of instructions needed to emit the N-ary add.
709 for (const FAddend *Opnd : Opnds) {
710 if (Opnd->isConstant())
713 // The constant check above is really for a few special constant
715 if (isa<UndefValue>(Opnd->getSymVal()))
718 const FAddendCoef &CE = Opnd->getCoef();
719 if (CE.isMinusOne() || CE.isMinusTwo())
722 // Let the addend be "c * x". If "c == +/-1", the value of the addend
723 // is immediately available; otherwise, it needs exactly one instruction
724 // to evaluate the value.
725 if (!CE.isMinusOne() && !CE.isOne())
728 if (NegOpndNum == OpndNum)
733 // Input Addend Value NeedNeg(output)
734 // ================================================================
735 // Constant C C false
736 // <+/-1, V> V coefficient is -1
737 // <2/-2, V> "fadd V, V" coefficient is -2
738 // <C, V> "fmul V, C" false
740 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
741 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
742 const FAddendCoef &Coeff = Opnd.getCoef();
744 if (Opnd.isConstant()) {
746 return Coeff.getValue(Instr->getType());
749 Value *OpndVal = Opnd.getSymVal();
751 if (Coeff.isMinusOne() || Coeff.isOne()) {
752 NeedNeg = Coeff.isMinusOne();
756 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
757 NeedNeg = Coeff.isMinusTwo();
758 return createFAdd(OpndVal, OpndVal);
762 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
765 // Checks if any operand is negative and we can convert add to sub.
766 // This function checks for following negative patterns
767 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
768 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
769 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
770 static Value *checkForNegativeOperand(BinaryOperator &I,
771 InstCombiner::BuilderTy &Builder) {
772 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
774 // This function creates 2 instructions to replace ADD, we need at least one
775 // of LHS or RHS to have one use to ensure benefit in transform.
776 if (!LHS->hasOneUse() && !RHS->hasOneUse())
779 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
780 const APInt *C1 = nullptr, *C2 = nullptr;
782 // if ONE is on other side, swap
783 if (match(RHS, m_Add(m_Value(X), m_One())))
786 if (match(LHS, m_Add(m_Value(X), m_One()))) {
787 // if XOR on other side, swap
788 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
791 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
792 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
793 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
794 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
795 Value *NewAnd = Builder.CreateAnd(Z, *C1);
796 return Builder.CreateSub(RHS, NewAnd, "sub");
797 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
798 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
799 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
800 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
801 return Builder.CreateSub(RHS, NewOr, "sub");
806 // Restore LHS and RHS
807 LHS = I.getOperand(0);
808 RHS = I.getOperand(1);
810 // if XOR is on other side, swap
811 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
815 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
816 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
817 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
818 if (C1->countTrailingZeros() == 0)
819 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
820 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
821 return Builder.CreateSub(RHS, NewOr, "sub");
826 Instruction *InstCombiner::foldAddWithConstant(BinaryOperator &Add) {
827 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
829 if (!match(Op1, m_Constant(Op1C)))
832 if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add))
837 // add (sub X, Y), -1 --> add (not Y), X
838 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
839 match(Op1, m_AllOnes()))
840 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
842 // zext(bool) + C -> bool ? C + 1 : C
843 if (match(Op0, m_ZExt(m_Value(X))) &&
844 X->getType()->getScalarSizeInBits() == 1)
845 return SelectInst::Create(X, AddOne(Op1C), Op1);
847 // ~X + C --> (C-1) - X
848 if (match(Op0, m_Not(m_Value(X))))
849 return BinaryOperator::CreateSub(SubOne(Op1C), X);
852 if (!match(Op1, m_APInt(C)))
855 if (C->isSignMask()) {
856 // If wrapping is not allowed, then the addition must set the sign bit:
857 // X + (signmask) --> X | signmask
858 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
859 return BinaryOperator::CreateOr(Op0, Op1);
861 // If wrapping is allowed, then the addition flips the sign bit of LHS:
862 // X + (signmask) --> X ^ signmask
863 return BinaryOperator::CreateXor(Op0, Op1);
866 // Is this add the last step in a convoluted sext?
867 // add(zext(xor i16 X, -32768), -32768) --> sext X
868 Type *Ty = Add.getType();
870 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
871 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
872 return CastInst::Create(Instruction::SExt, X, Ty);
874 // (add (zext (add nuw X, C2)), C) --> (zext (add nuw X, C2 + C))
875 if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
876 C->isNegative() && C->sge(-C2->sext(C->getBitWidth()))) {
878 ConstantInt::get(X->getType(), *C2 + C->trunc(C2->getBitWidth()));
879 return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
882 if (C->isOneValue() && Op0->hasOneUse()) {
883 // add (sext i1 X), 1 --> zext (not X)
884 // TODO: The smallest IR representation is (select X, 0, 1), and that would
885 // not require the one-use check. But we need to remove a transform in
886 // visitSelect and make sure that IR value tracking for select is equal or
887 // better than for these ops.
888 if (match(Op0, m_SExt(m_Value(X))) &&
889 X->getType()->getScalarSizeInBits() == 1)
890 return new ZExtInst(Builder.CreateNot(X), Ty);
892 // Shifts and add used to flip and mask off the low bit:
893 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
895 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
896 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
897 Value *NotX = Builder.CreateNot(X);
898 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
905 // Matches multiplication expression Op * C where C is a constant. Returns the
906 // constant value in C and the other operand in Op. Returns true if such a
908 static bool MatchMul(Value *E, Value *&Op, APInt &C) {
910 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
914 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
915 C = APInt(AI->getBitWidth(), 1);
922 // Matches remainder expression Op % C where C is a constant. Returns the
923 // constant value in C and the other operand in Op. Returns the signedness of
924 // the remainder operation in IsSigned. Returns true if such a match is
926 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
929 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
934 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
938 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
945 // Matches division expression Op / C with the given signedness as indicated
946 // by IsSigned, where C is a constant. Returns the constant value in C and the
947 // other operand in Op. Returns true if such a match is found.
948 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
950 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
955 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
959 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
960 C = APInt(AI->getBitWidth(), 1);
968 // Returns whether C0 * C1 with the given signedness overflows.
969 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
972 (void)C0.smul_ov(C1, overflow);
974 (void)C0.umul_ov(C1, overflow);
978 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
979 // does not overflow.
980 Value *InstCombiner::SimplifyAddWithRemainder(BinaryOperator &I) {
981 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
985 // Match I = X % C0 + MulOpV * C0
986 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
987 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
992 // Match MulOpC = RemOpV % C1
993 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
994 IsSigned == Rem2IsSigned) {
997 // Match RemOpV = X / C0
998 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
999 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1001 ConstantInt::get(X->getType()->getContext(), C0 * C1);
1002 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1003 : Builder.CreateURem(X, NewDivisor, "urem");
1012 /// (1 << NBits) - 1
1014 /// ~(-(1 << NBits))
1015 /// Because a 'not' is better for bit-tracking analysis and other transforms
1016 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
1017 static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
1018 InstCombiner::BuilderTy &Builder) {
1020 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1023 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1024 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1025 // Be wary of constant folding.
1026 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1027 // Always NSW. But NUW propagates from `add`.
1028 BOp->setHasNoSignedWrap();
1029 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1032 return BinaryOperator::CreateNot(NotMask, I.getName());
1035 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
1036 if (Value *V = SimplifyAddInst(I.getOperand(0), I.getOperand(1),
1037 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1038 SQ.getWithInstruction(&I)))
1039 return replaceInstUsesWith(I, V);
1041 if (SimplifyAssociativeOrCommutative(I))
1044 if (Instruction *X = foldVectorBinop(I))
1047 // (A*B)+(A*C) -> A*(B+C) etc
1048 if (Value *V = SimplifyUsingDistributiveLaws(I))
1049 return replaceInstUsesWith(I, V);
1051 if (Instruction *X = foldAddWithConstant(I))
1054 // FIXME: This should be moved into the above helper function to allow these
1055 // transforms for general constant or constant splat vectors.
1056 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1057 Type *Ty = I.getType();
1058 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1059 Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
1060 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
1061 unsigned TySizeBits = Ty->getScalarSizeInBits();
1062 const APInt &RHSVal = CI->getValue();
1063 unsigned ExtendAmt = 0;
1064 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
1065 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
1066 if (XorRHS->getValue() == -RHSVal) {
1067 if (RHSVal.isPowerOf2())
1068 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
1069 else if (XorRHS->getValue().isPowerOf2())
1070 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
1074 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
1075 if (!MaskedValueIsZero(XorLHS, Mask, 0, &I))
1080 Constant *ShAmt = ConstantInt::get(Ty, ExtendAmt);
1081 Value *NewShl = Builder.CreateShl(XorLHS, ShAmt, "sext");
1082 return BinaryOperator::CreateAShr(NewShl, ShAmt);
1085 // If this is a xor that was canonicalized from a sub, turn it back into
1086 // a sub and fuse this add with it.
1087 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
1088 KnownBits LHSKnown = computeKnownBits(XorLHS, 0, &I);
1089 if ((XorRHS->getValue() | LHSKnown.Zero).isAllOnesValue())
1090 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
1093 // (X + signmask) + C could have gotten canonicalized to (X^signmask) + C,
1094 // transform them into (X + (signmask ^ C))
1095 if (XorRHS->getValue().isSignMask())
1096 return BinaryOperator::CreateAdd(XorLHS,
1097 ConstantExpr::getXor(XorRHS, CI));
1101 if (Ty->isIntOrIntVectorTy(1))
1102 return BinaryOperator::CreateXor(LHS, RHS);
1106 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1107 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1108 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1113 if (match(LHS, m_Neg(m_Value(A)))) {
1114 // -A + -B --> -(A + B)
1115 if (match(RHS, m_Neg(m_Value(B))))
1116 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1119 return BinaryOperator::CreateSub(RHS, A);
1123 if (match(RHS, m_Neg(m_Value(B))))
1124 return BinaryOperator::CreateSub(LHS, B);
1126 if (Value *V = checkForNegativeOperand(I, Builder))
1127 return replaceInstUsesWith(I, V);
1129 // (A + 1) + ~B --> A - B
1130 // ~B + (A + 1) --> A - B
1131 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))))
1132 return BinaryOperator::CreateSub(A, B);
1134 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1135 if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V);
1137 // A+B --> A|B iff A and B have no bits set in common.
1138 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1139 return BinaryOperator::CreateOr(LHS, RHS);
1141 // FIXME: We already did a check for ConstantInt RHS above this.
1142 // FIXME: Is this pattern covered by another fold? No regression tests fail on
1144 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
1145 // (X & FF00) + xx00 -> (X+xx00) & FF00
1148 if (LHS->hasOneUse() &&
1149 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
1150 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
1151 // See if all bits from the first bit set in the Add RHS up are included
1152 // in the mask. First, get the rightmost bit.
1153 const APInt &AddRHSV = CRHS->getValue();
1155 // Form a mask of all bits from the lowest bit added through the top.
1156 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
1158 // See if the and mask includes all of these bits.
1159 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
1161 if (AddRHSHighBits == AddRHSHighBitsAnd) {
1162 // Okay, the xform is safe. Insert the new add pronto.
1163 Value *NewAdd = Builder.CreateAdd(X, CRHS, LHS->getName());
1164 return BinaryOperator::CreateAnd(NewAdd, C2);
1169 // add (select X 0 (sub n A)) A --> select X A n
1171 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1174 SI = dyn_cast<SelectInst>(RHS);
1177 if (SI && SI->hasOneUse()) {
1178 Value *TV = SI->getTrueValue();
1179 Value *FV = SI->getFalseValue();
1182 // Can we fold the add into the argument of the select?
1183 // We check both true and false select arguments for a matching subtract.
1184 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1185 // Fold the add into the true select value.
1186 return SelectInst::Create(SI->getCondition(), N, A);
1188 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1189 // Fold the add into the false select value.
1190 return SelectInst::Create(SI->getCondition(), A, N);
1194 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1197 // (add (xor A, B) (and A, B)) --> (or A, B)
1198 // (add (and A, B) (xor A, B)) --> (or A, B)
1199 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1200 m_c_And(m_Deferred(A), m_Deferred(B)))))
1201 return BinaryOperator::CreateOr(A, B);
1203 // (add (or A, B) (and A, B)) --> (add A, B)
1204 // (add (and A, B) (or A, B)) --> (add A, B)
1205 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1206 m_c_And(m_Deferred(A), m_Deferred(B))))) {
1212 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1213 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1214 // computeKnownBits.
1215 bool Changed = false;
1216 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) {
1218 I.setHasNoSignedWrap(true);
1220 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) {
1222 I.setHasNoUnsignedWrap(true);
1225 if (Instruction *V = canonicalizeLowbitMask(I, Builder))
1228 return Changed ? &I : nullptr;
1231 /// Factor a common operand out of fadd/fsub of fmul/fdiv.
1232 static Instruction *factorizeFAddFSub(BinaryOperator &I,
1233 InstCombiner::BuilderTy &Builder) {
1234 assert((I.getOpcode() == Instruction::FAdd ||
1235 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1236 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1237 "FP factorization requires FMF");
1238 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1241 if ((match(Op0, m_OneUse(m_FMul(m_Value(X), m_Value(Z)))) &&
1242 match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))) ||
1243 (match(Op0, m_OneUse(m_FMul(m_Value(Z), m_Value(X)))) &&
1244 match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))))
1246 else if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Z)))) &&
1247 match(Op1, m_OneUse(m_FDiv(m_Value(Y), m_Specific(Z)))))
1252 // (X * Z) + (Y * Z) --> (X + Y) * Z
1253 // (X * Z) - (Y * Z) --> (X - Y) * Z
1254 // (X / Z) + (Y / Z) --> (X + Y) / Z
1255 // (X / Z) - (Y / Z) --> (X - Y) / Z
1256 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1257 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
1258 : Builder.CreateFSubFMF(X, Y, &I);
1260 // Bail out if we just created a denormal constant.
1261 // TODO: This is copied from a previous implementation. Is it necessary?
1263 if (match(XY, m_APFloat(C)) && !C->isNormal())
1266 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
1267 : BinaryOperator::CreateFDivFMF(XY, Z, &I);
1270 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
1271 if (Value *V = SimplifyFAddInst(I.getOperand(0), I.getOperand(1),
1272 I.getFastMathFlags(),
1273 SQ.getWithInstruction(&I)))
1274 return replaceInstUsesWith(I, V);
1276 if (SimplifyAssociativeOrCommutative(I))
1279 if (Instruction *X = foldVectorBinop(I))
1282 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
1285 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1287 // (-X) + Y --> Y - X
1288 if (match(LHS, m_FNeg(m_Value(X))))
1289 return BinaryOperator::CreateFSubFMF(RHS, X, &I);
1290 // Y + (-X) --> Y - X
1291 if (match(RHS, m_FNeg(m_Value(X))))
1292 return BinaryOperator::CreateFSubFMF(LHS, X, &I);
1294 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1295 // integer add followed by a promotion.
1296 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1297 Value *LHSIntVal = LHSConv->getOperand(0);
1298 Type *FPType = LHSConv->getType();
1300 // TODO: This check is overly conservative. In many cases known bits
1301 // analysis can tell us that the result of the addition has less significant
1302 // bits than the integer type can hold.
1303 auto IsValidPromotion = [](Type *FTy, Type *ITy) {
1304 Type *FScalarTy = FTy->getScalarType();
1305 Type *IScalarTy = ITy->getScalarType();
1307 // Do we have enough bits in the significand to represent the result of
1308 // the integer addition?
1309 unsigned MaxRepresentableBits =
1310 APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
1311 return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
1314 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1315 // ... if the constant fits in the integer value. This is useful for things
1316 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1317 // requires a constant pool load, and generally allows the add to be better
1319 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1320 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1322 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1323 if (LHSConv->hasOneUse() &&
1324 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1325 willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1326 // Insert the new integer add.
1327 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
1328 return new SIToFPInst(NewAdd, I.getType());
1332 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1333 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1334 Value *RHSIntVal = RHSConv->getOperand(0);
1335 // It's enough to check LHS types only because we require int types to
1336 // be the same for this transform.
1337 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1338 // Only do this if x/y have the same type, if at least one of them has a
1339 // single use (so we don't increase the number of int->fp conversions),
1340 // and if the integer add will not overflow.
1341 if (LHSIntVal->getType() == RHSIntVal->getType() &&
1342 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1343 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1344 // Insert the new integer add.
1345 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
1346 return new SIToFPInst(NewAdd, I.getType());
1352 // Handle specials cases for FAdd with selects feeding the operation
1353 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
1354 return replaceInstUsesWith(I, V);
1356 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
1357 if (Instruction *F = factorizeFAddFSub(I, Builder))
1359 if (Value *V = FAddCombine(Builder).simplify(&I))
1360 return replaceInstUsesWith(I, V);
1366 /// Optimize pointer differences into the same array into a size. Consider:
1367 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1368 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1369 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
1371 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1373 bool Swapped = false;
1374 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1376 // For now we require one side to be the base pointer "A" or a constant
1377 // GEP derived from it.
1378 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1380 if (LHSGEP->getOperand(0) == RHS) {
1383 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1384 // (gep X, ...) - (gep X, ...)
1385 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1386 RHSGEP->getOperand(0)->stripPointerCasts()) {
1394 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1396 if (RHSGEP->getOperand(0) == LHS) {
1399 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1400 // (gep X, ...) - (gep X, ...)
1401 if (RHSGEP->getOperand(0)->stripPointerCasts() ==
1402 LHSGEP->getOperand(0)->stripPointerCasts()) {
1415 // (gep X, ...) - (gep X, ...)
1417 // Avoid duplicating the arithmetic if there are more than one non-constant
1418 // indices between the two GEPs and either GEP has a non-constant index and
1419 // multiple users. If zero non-constant index, the result is a constant and
1420 // there is no duplication. If one non-constant index, the result is an add
1421 // or sub with a constant, which is no larger than the original code, and
1422 // there's no duplicated arithmetic, even if either GEP has multiple
1423 // users. If more than one non-constant indices combined, as long as the GEP
1424 // with at least one non-constant index doesn't have multiple users, there
1425 // is no duplication.
1426 unsigned NumNonConstantIndices1 = GEP1->countNonConstantIndices();
1427 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1428 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1429 ((NumNonConstantIndices1 > 0 && !GEP1->hasOneUse()) ||
1430 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1435 // Emit the offset of the GEP and an intptr_t.
1436 Value *Result = EmitGEPOffset(GEP1);
1438 // If we had a constant expression GEP on the other side offsetting the
1439 // pointer, subtract it from the offset we have.
1441 Value *Offset = EmitGEPOffset(GEP2);
1442 Result = Builder.CreateSub(Result, Offset);
1445 // If we have p - gep(p, ...) then we have to negate the result.
1447 Result = Builder.CreateNeg(Result, "diff.neg");
1449 return Builder.CreateIntCast(Result, Ty, true);
1452 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
1453 if (Value *V = SimplifySubInst(I.getOperand(0), I.getOperand(1),
1454 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1455 SQ.getWithInstruction(&I)))
1456 return replaceInstUsesWith(I, V);
1458 if (Instruction *X = foldVectorBinop(I))
1461 // (A*B)-(A*C) -> A*(B-C) etc
1462 if (Value *V = SimplifyUsingDistributiveLaws(I))
1463 return replaceInstUsesWith(I, V);
1465 // If this is a 'B = x-(-A)', change to B = x+A.
1466 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1467 if (Value *V = dyn_castNegVal(Op1)) {
1468 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1470 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1471 assert(BO->getOpcode() == Instruction::Sub &&
1472 "Expected a subtraction operator!");
1473 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1474 Res->setHasNoSignedWrap(true);
1476 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1477 Res->setHasNoSignedWrap(true);
1483 if (I.getType()->isIntOrIntVectorTy(1))
1484 return BinaryOperator::CreateXor(Op0, Op1);
1486 // Replace (-1 - A) with (~A).
1487 if (match(Op0, m_AllOnes()))
1488 return BinaryOperator::CreateNot(Op1);
1490 // (~X) - (~Y) --> Y - X
1492 if (match(Op0, m_Not(m_Value(X))) && match(Op1, m_Not(m_Value(Y))))
1493 return BinaryOperator::CreateSub(Y, X);
1495 // (X + -1) - Y --> ~Y + X
1496 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
1497 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
1499 // Y - (X + 1) --> ~X + Y
1500 if (match(Op1, m_OneUse(m_Add(m_Value(X), m_One()))))
1501 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Op0);
1503 if (Constant *C = dyn_cast<Constant>(Op0)) {
1504 bool IsNegate = match(C, m_ZeroInt());
1506 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1507 // 0 - (zext bool) --> sext bool
1508 // C - (zext bool) --> bool ? C - 1 : C
1510 return CastInst::CreateSExtOrBitCast(X, I.getType());
1511 return SelectInst::Create(X, SubOne(C), C);
1513 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1514 // 0 - (sext bool) --> zext bool
1515 // C - (sext bool) --> bool ? C + 1 : C
1517 return CastInst::CreateZExtOrBitCast(X, I.getType());
1518 return SelectInst::Create(X, AddOne(C), C);
1521 // C - ~X == X + (1+C)
1522 if (match(Op1, m_Not(m_Value(X))))
1523 return BinaryOperator::CreateAdd(X, AddOne(C));
1525 // Try to fold constant sub into select arguments.
1526 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1527 if (Instruction *R = FoldOpIntoSelect(I, SI))
1530 // Try to fold constant sub into PHI values.
1531 if (PHINode *PN = dyn_cast<PHINode>(Op1))
1532 if (Instruction *R = foldOpIntoPhi(I, PN))
1535 // C-(X+C2) --> (C-C2)-X
1537 if (match(Op1, m_Add(m_Value(X), m_Constant(C2))))
1538 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1542 if (match(Op0, m_APInt(Op0C))) {
1543 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1545 // -(X >>u 31) -> (X >>s 31)
1546 // -(X >>s 31) -> (X >>u 31)
1547 if (Op0C->isNullValue()) {
1550 if (match(Op1, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1551 *ShAmt == BitWidth - 1) {
1552 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1553 return BinaryOperator::CreateAShr(X, ShAmtOp);
1555 if (match(Op1, m_AShr(m_Value(X), m_APInt(ShAmt))) &&
1556 *ShAmt == BitWidth - 1) {
1557 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1558 return BinaryOperator::CreateLShr(X, ShAmtOp);
1561 if (Op1->hasOneUse()) {
1563 SelectPatternFlavor SPF = matchSelectPattern(Op1, LHS, RHS).Flavor;
1564 if (SPF == SPF_ABS || SPF == SPF_NABS) {
1565 // This is a negate of an ABS/NABS pattern. Just swap the operands
1567 SelectInst *SI = cast<SelectInst>(Op1);
1568 Value *TrueVal = SI->getTrueValue();
1569 Value *FalseVal = SI->getFalseValue();
1570 SI->setTrueValue(FalseVal);
1571 SI->setFalseValue(TrueVal);
1572 // Don't swap prof metadata, we didn't change the branch behavior.
1573 return replaceInstUsesWith(I, SI);
1578 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1580 if (Op0C->isMask()) {
1581 KnownBits RHSKnown = computeKnownBits(Op1, 0, &I);
1582 if ((*Op0C | RHSKnown.Zero).isAllOnesValue())
1583 return BinaryOperator::CreateXor(Op1, Op0);
1589 // X-(X+Y) == -Y X-(Y+X) == -Y
1590 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
1591 return BinaryOperator::CreateNeg(Y);
1594 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1595 return BinaryOperator::CreateNeg(Y);
1598 // (sub (or A, B), (xor A, B)) --> (and A, B)
1601 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1602 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1603 return BinaryOperator::CreateAnd(A, B);
1608 // ((X | Y) - X) --> (~X & Y)
1609 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
1610 return BinaryOperator::CreateAnd(
1611 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
1614 if (Op1->hasOneUse()) {
1615 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
1616 Constant *C = nullptr;
1618 // (X - (Y - Z)) --> (X + (Z - Y)).
1619 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
1620 return BinaryOperator::CreateAdd(Op0,
1621 Builder.CreateSub(Z, Y, Op1->getName()));
1623 // (X - (X & Y)) --> (X & ~Y)
1624 if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
1625 return BinaryOperator::CreateAnd(Op0,
1626 Builder.CreateNot(Y, Y->getName() + ".not"));
1628 // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
1629 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
1630 C->isNotMinSignedValue() && !C->isOneValue())
1631 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
1633 // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
1634 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
1635 if (Value *XNeg = dyn_castNegVal(X))
1636 return BinaryOperator::CreateShl(XNeg, Y);
1638 // Subtracting -1/0 is the same as adding 1/0:
1639 // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y)
1640 // 'nuw' is dropped in favor of the canonical form.
1641 if (match(Op1, m_SExt(m_Value(Y))) &&
1642 Y->getType()->getScalarSizeInBits() == 1) {
1643 Value *Zext = Builder.CreateZExt(Y, I.getType());
1644 BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
1645 Add->setHasNoSignedWrap(I.hasNoSignedWrap());
1649 // X - A*-B -> X + A*B
1650 // X - -A*B -> X + A*B
1652 if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
1653 return BinaryOperator::CreateAdd(Op0, Builder.CreateMul(A, B));
1655 // X - A*C -> X + A*-C
1656 // No need to handle commuted multiply because multiply handling will
1657 // ensure constant will be move to the right hand side.
1658 if (match(Op1, m_Mul(m_Value(A), m_Constant(C))) && !isa<ConstantExpr>(C)) {
1659 Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(C));
1660 return BinaryOperator::CreateAdd(Op0, NewMul);
1665 // ~A - Min/Max(~A, O) -> Max/Min(A, ~O) - A
1666 // ~A - Min/Max(O, ~A) -> Max/Min(A, ~O) - A
1667 // Min/Max(~A, O) - ~A -> A - Max/Min(A, ~O)
1668 // Min/Max(O, ~A) - ~A -> A - Max/Min(A, ~O)
1669 // So long as O here is freely invertible, this will be neutral or a win.
1670 Value *LHS, *RHS, *A;
1671 Value *NotA = Op0, *MinMax = Op1;
1672 SelectPatternFlavor SPF = matchSelectPattern(MinMax, LHS, RHS).Flavor;
1673 if (!SelectPatternResult::isMinOrMax(SPF)) {
1676 SPF = matchSelectPattern(MinMax, LHS, RHS).Flavor;
1678 if (SelectPatternResult::isMinOrMax(SPF) &&
1679 match(NotA, m_Not(m_Value(A))) && (NotA == LHS || NotA == RHS)) {
1681 std::swap(LHS, RHS);
1682 // LHS is now O above and expected to have at least 2 uses (the min/max)
1683 // NotA is epected to have 2 uses from the min/max and 1 from the sub.
1684 if (IsFreeToInvert(LHS, !LHS->hasNUsesOrMore(3)) &&
1685 !NotA->hasNUsesOrMore(4)) {
1686 // Note: We don't generate the inverse max/min, just create the not of
1687 // it and let other folds do the rest.
1688 Value *Not = Builder.CreateNot(MinMax);
1690 return BinaryOperator::CreateSub(Not, A);
1692 return BinaryOperator::CreateSub(A, Not);
1697 // Optimize pointer differences into the same array into a size. Consider:
1698 // &A[10] - &A[0]: we should compile this to "10".
1699 Value *LHSOp, *RHSOp;
1700 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1701 match(Op1, m_PtrToInt(m_Value(RHSOp))))
1702 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1703 return replaceInstUsesWith(I, Res);
1705 // trunc(p)-trunc(q) -> trunc(p-q)
1706 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1707 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1708 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1709 return replaceInstUsesWith(I, Res);
1711 // Canonicalize a shifty way to code absolute value to the common pattern.
1712 // There are 2 potential commuted variants.
1713 // We're relying on the fact that we only do this transform when the shift has
1714 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
1718 Type *Ty = I.getType();
1719 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
1720 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
1721 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
1722 // B = ashr i32 A, 31 ; smear the sign bit
1723 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
1724 // --> (A < 0) ? -A : A
1725 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty));
1726 // Copy the nuw/nsw flags from the sub to the negate.
1727 Value *Neg = Builder.CreateNeg(A, "", I.hasNoUnsignedWrap(),
1728 I.hasNoSignedWrap());
1729 return SelectInst::Create(Cmp, Neg, A);
1732 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1735 bool Changed = false;
1736 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
1738 I.setHasNoSignedWrap(true);
1740 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
1742 I.setHasNoUnsignedWrap(true);
1745 return Changed ? &I : nullptr;
1748 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
1749 if (Value *V = SimplifyFSubInst(I.getOperand(0), I.getOperand(1),
1750 I.getFastMathFlags(),
1751 SQ.getWithInstruction(&I)))
1752 return replaceInstUsesWith(I, V);
1754 if (Instruction *X = foldVectorBinop(I))
1757 // Subtraction from -0.0 is the canonical form of fneg.
1758 // fsub nsz 0, X ==> fsub nsz -0.0, X
1759 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1760 if (I.hasNoSignedZeros() && match(Op0, m_PosZeroFP()))
1761 return BinaryOperator::CreateFNegFMF(Op1, &I);
1766 // Fold negation into constant operand. This is limited with one-use because
1767 // fneg is assumed better for analysis and cheaper in codegen than fmul/fdiv.
1768 // -(X * C) --> X * (-C)
1769 if (match(&I, m_FNeg(m_OneUse(m_FMul(m_Value(X), m_Constant(C))))))
1770 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
1771 // -(X / C) --> X / (-C)
1772 if (match(&I, m_FNeg(m_OneUse(m_FDiv(m_Value(X), m_Constant(C))))))
1773 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1774 // -(C / X) --> (-C) / X
1775 if (match(&I, m_FNeg(m_OneUse(m_FDiv(m_Constant(C), m_Value(X))))))
1776 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1778 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
1779 // Canonicalize to fadd to make analysis easier.
1780 // This can also help codegen because fadd is commutative.
1781 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
1782 // killed later. We still limit that particular transform with 'hasOneUse'
1783 // because an fneg is assumed better/cheaper than a generic fsub.
1784 if (I.hasNoSignedZeros() || CannotBeNegativeZero(Op0, SQ.TLI)) {
1785 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
1786 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
1787 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
1791 if (isa<Constant>(Op0))
1792 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1793 if (Instruction *NV = FoldOpIntoSelect(I, SI))
1796 // X - C --> X + (-C)
1797 // But don't transform constant expressions because there's an inverse fold
1798 // for X + (-Y) --> X - Y.
1799 if (match(Op1, m_Constant(C)) && !isa<ConstantExpr>(Op1))
1800 return BinaryOperator::CreateFAddFMF(Op0, ConstantExpr::getFNeg(C), &I);
1802 // X - (-Y) --> X + Y
1803 if (match(Op1, m_FNeg(m_Value(Y))))
1804 return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
1806 // Similar to above, but look through a cast of the negated value:
1807 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
1808 Type *Ty = I.getType();
1809 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
1810 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I);
1812 // X - (fpext(-Y)) --> X + fpext(Y)
1813 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
1814 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I);
1816 // Handle special cases for FSub with selects feeding the operation
1817 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
1818 return replaceInstUsesWith(I, V);
1820 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
1821 // (Y - X) - Y --> -X
1822 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
1823 return BinaryOperator::CreateFNegFMF(X, &I);
1825 // Y - (X + Y) --> -X
1826 // Y - (Y + X) --> -X
1827 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
1828 return BinaryOperator::CreateFNegFMF(X, &I);
1830 // (X * C) - X --> X * (C - 1.0)
1831 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
1832 Constant *CSubOne = ConstantExpr::getFSub(C, ConstantFP::get(Ty, 1.0));
1833 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
1835 // X - (X * C) --> X * (1.0 - C)
1836 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
1837 Constant *OneSubC = ConstantExpr::getFSub(ConstantFP::get(Ty, 1.0), C);
1838 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
1841 if (Instruction *F = factorizeFAddFSub(I, Builder))
1844 // TODO: This performs reassociative folds for FP ops. Some fraction of the
1845 // functionality has been subsumed by simple pattern matching here and in
1846 // InstSimplify. We should let a dedicated reassociation pass handle more
1847 // complex pattern matching and remove this from InstCombine.
1848 if (Value *V = FAddCombine(Builder).simplify(&I))
1849 return replaceInstUsesWith(I, V);