1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for add, fadd, sub, and fsub.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/GetElementPtrTypeIterator.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
23 using namespace PatternMatch;
25 #define DEBUG_TYPE "instcombine"
29 /// Class representing coefficient of floating-point addend.
30 /// This class needs to be highly efficient, which is especially true for
31 /// the constructor. As of I write this comment, the cost of the default
32 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
33 /// perform write-merging).
37 // The constructor has to initialize a APFloat, which is unnecessary for
38 // most addends which have coefficient either 1 or -1. So, the constructor
39 // is expensive. In order to avoid the cost of the constructor, we should
40 // reuse some instances whenever possible. The pre-created instances
41 // FAddCombine::Add[0-5] embodies this idea.
43 FAddendCoef() : IsFp(false), BufHasFpVal(false), IntVal(0) {}
47 assert(!insaneIntVal(C) && "Insane coefficient");
48 IsFp = false; IntVal = C;
51 void set(const APFloat& C);
55 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
56 Value *getValue(Type *) const;
58 // If possible, don't define operator+/operator- etc because these
59 // operators inevitably call FAddendCoef's constructor which is not cheap.
60 void operator=(const FAddendCoef &A);
61 void operator+=(const FAddendCoef &A);
62 void operator*=(const FAddendCoef &S);
64 bool isOne() const { return isInt() && IntVal == 1; }
65 bool isTwo() const { return isInt() && IntVal == 2; }
66 bool isMinusOne() const { return isInt() && IntVal == -1; }
67 bool isMinusTwo() const { return isInt() && IntVal == -2; }
70 bool insaneIntVal(int V) { return V > 4 || V < -4; }
71 APFloat *getFpValPtr()
72 { return reinterpret_cast<APFloat*>(&FpValBuf.buffer[0]); }
73 const APFloat *getFpValPtr() const
74 { return reinterpret_cast<const APFloat*>(&FpValBuf.buffer[0]); }
76 const APFloat &getFpVal() const {
77 assert(IsFp && BufHasFpVal && "Incorret state");
78 return *getFpValPtr();
82 assert(IsFp && BufHasFpVal && "Incorret state");
83 return *getFpValPtr();
86 bool isInt() const { return !IsFp; }
88 // If the coefficient is represented by an integer, promote it to a
90 void convertToFpType(const fltSemantics &Sem);
92 // Construct an APFloat from a signed integer.
93 // TODO: We should get rid of this function when APFloat can be constructed
94 // from an *SIGNED* integer.
95 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
100 // True iff FpValBuf contains an instance of APFloat.
103 // The integer coefficient of an individual addend is either 1 or -1,
104 // and we try to simplify at most 4 addends from neighboring at most
105 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
106 // is overkill of this end.
109 AlignedCharArrayUnion<APFloat> FpValBuf;
112 /// FAddend is used to represent floating-point addend. An addend is
113 /// represented as <C, V>, where the V is a symbolic value, and C is a
114 /// constant coefficient. A constant addend is represented as <C, 0>.
118 FAddend() : Val(nullptr) {}
120 Value *getSymVal() const { return Val; }
121 const FAddendCoef &getCoef() const { return Coeff; }
123 bool isConstant() const { return Val == nullptr; }
124 bool isZero() const { return Coeff.isZero(); }
126 void set(short Coefficient, Value *V) {
127 Coeff.set(Coefficient);
130 void set(const APFloat &Coefficient, Value *V) {
131 Coeff.set(Coefficient);
134 void set(const ConstantFP *Coefficient, Value *V) {
135 Coeff.set(Coefficient->getValueAPF());
139 void negate() { Coeff.negate(); }
141 /// Drill down the U-D chain one step to find the definition of V, and
142 /// try to break the definition into one or two addends.
143 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
145 /// Similar to FAddend::drillDownOneStep() except that the value being
146 /// splitted is the addend itself.
147 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
149 void operator+=(const FAddend &T) {
150 assert((Val == T.Val) && "Symbolic-values disagree");
155 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
157 // This addend has the value of "Coeff * Val".
162 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
163 /// with its neighboring at most two instructions.
167 FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {}
168 Value *simplify(Instruction *FAdd);
171 typedef SmallVector<const FAddend*, 4> AddendVect;
173 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
175 Value *performFactorization(Instruction *I);
177 /// Convert given addend to a Value
178 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
180 /// Return the number of instructions needed to emit the N-ary addition.
181 unsigned calcInstrNumber(const AddendVect& Vect);
182 Value *createFSub(Value *Opnd0, Value *Opnd1);
183 Value *createFAdd(Value *Opnd0, Value *Opnd1);
184 Value *createFMul(Value *Opnd0, Value *Opnd1);
185 Value *createFDiv(Value *Opnd0, Value *Opnd1);
186 Value *createFNeg(Value *V);
187 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
188 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
190 InstCombiner::BuilderTy *Builder;
193 // Debugging stuff are clustered here.
195 unsigned CreateInstrNum;
196 void initCreateInstNum() { CreateInstrNum = 0; }
197 void incCreateInstNum() { CreateInstrNum++; }
199 void initCreateInstNum() {}
200 void incCreateInstNum() {}
204 } // anonymous namespace
206 //===----------------------------------------------------------------------===//
209 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
211 //===----------------------------------------------------------------------===//
212 FAddendCoef::~FAddendCoef() {
214 getFpValPtr()->~APFloat();
217 void FAddendCoef::set(const APFloat& C) {
218 APFloat *P = getFpValPtr();
221 // As the buffer is meanless byte stream, we cannot call
222 // APFloat::operator=().
227 IsFp = BufHasFpVal = true;
230 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
234 APFloat *P = getFpValPtr();
236 new(P) APFloat(Sem, IntVal);
238 new(P) APFloat(Sem, 0 - IntVal);
241 IsFp = BufHasFpVal = true;
244 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
246 return APFloat(Sem, Val);
248 APFloat T(Sem, 0 - Val);
254 void FAddendCoef::operator=(const FAddendCoef &That) {
258 set(That.getFpVal());
261 void FAddendCoef::operator+=(const FAddendCoef &That) {
262 enum APFloat::roundingMode RndMode = APFloat::rmNearestTiesToEven;
263 if (isInt() == That.isInt()) {
265 IntVal += That.IntVal;
267 getFpVal().add(That.getFpVal(), RndMode);
272 const APFloat &T = That.getFpVal();
273 convertToFpType(T.getSemantics());
274 getFpVal().add(T, RndMode);
278 APFloat &T = getFpVal();
279 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
282 void FAddendCoef::operator*=(const FAddendCoef &That) {
286 if (That.isMinusOne()) {
291 if (isInt() && That.isInt()) {
292 int Res = IntVal * (int)That.IntVal;
293 assert(!insaneIntVal(Res) && "Insane int value");
298 const fltSemantics &Semantic =
299 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
302 convertToFpType(Semantic);
303 APFloat &F0 = getFpVal();
306 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
307 APFloat::rmNearestTiesToEven);
309 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
312 void FAddendCoef::negate() {
316 getFpVal().changeSign();
319 Value *FAddendCoef::getValue(Type *Ty) const {
321 ConstantFP::get(Ty, float(IntVal)) :
322 ConstantFP::get(Ty->getContext(), getFpVal());
325 // The definition of <Val> Addends
326 // =========================================
327 // A + B <1, A>, <1,B>
328 // A - B <1, A>, <1,B>
331 // A + C <1, A> <C, NULL>
332 // 0 +/- 0 <0, NULL> (corner case)
334 // Legend: A and B are not constant, C is constant
336 unsigned FAddend::drillValueDownOneStep
337 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
338 Instruction *I = nullptr;
339 if (!Val || !(I = dyn_cast<Instruction>(Val)))
342 unsigned Opcode = I->getOpcode();
344 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
346 Value *Opnd0 = I->getOperand(0);
347 Value *Opnd1 = I->getOperand(1);
348 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
351 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
356 Addend0.set(1, Opnd0);
358 Addend0.set(C0, nullptr);
362 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
364 Addend.set(1, Opnd1);
366 Addend.set(C1, nullptr);
367 if (Opcode == Instruction::FSub)
372 return Opnd0 && Opnd1 ? 2 : 1;
374 // Both operands are zero. Weird!
375 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
379 if (I->getOpcode() == Instruction::FMul) {
380 Value *V0 = I->getOperand(0);
381 Value *V1 = I->getOperand(1);
382 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
387 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
396 // Try to break *this* addend into two addends. e.g. Suppose this addend is
397 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
398 // i.e. <2.3, X> and <2.3, Y>.
400 unsigned FAddend::drillAddendDownOneStep
401 (FAddend &Addend0, FAddend &Addend1) const {
405 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
406 if (!BreakNum || Coeff.isOne())
409 Addend0.Scale(Coeff);
412 Addend1.Scale(Coeff);
417 // Try to perform following optimization on the input instruction I. Return the
418 // simplified expression if was successful; otherwise, return 0.
420 // Instruction "I" is Simplified into
421 // -------------------------------------------------------
422 // (x * y) +/- (x * z) x * (y +/- z)
423 // (y / x) +/- (z / x) (y +/- z) / x
425 Value *FAddCombine::performFactorization(Instruction *I) {
426 assert((I->getOpcode() == Instruction::FAdd ||
427 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
429 Instruction *I0 = dyn_cast<Instruction>(I->getOperand(0));
430 Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1));
432 if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
436 if (I0->getOpcode() == Instruction::FMul)
438 else if (I0->getOpcode() != Instruction::FDiv)
441 Value *Opnd0_0 = I0->getOperand(0);
442 Value *Opnd0_1 = I0->getOperand(1);
443 Value *Opnd1_0 = I1->getOperand(0);
444 Value *Opnd1_1 = I1->getOperand(1);
446 // Input Instr I Factor AddSub0 AddSub1
447 // ----------------------------------------------
448 // (x*y) +/- (x*z) x y z
449 // (y/x) +/- (z/x) x y z
451 Value *Factor = nullptr;
452 Value *AddSub0 = nullptr, *AddSub1 = nullptr;
455 if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
457 else if (Opnd0_1 == Opnd1_0 || Opnd0_1 == Opnd1_1)
461 AddSub0 = (Factor == Opnd0_0) ? Opnd0_1 : Opnd0_0;
462 AddSub1 = (Factor == Opnd1_0) ? Opnd1_1 : Opnd1_0;
464 } else if (Opnd0_1 == Opnd1_1) {
474 Flags.setUnsafeAlgebra();
475 if (I0) Flags &= I->getFastMathFlags();
476 if (I1) Flags &= I->getFastMathFlags();
478 // Create expression "NewAddSub = AddSub0 +/- AddsSub1"
479 Value *NewAddSub = (I->getOpcode() == Instruction::FAdd) ?
480 createFAdd(AddSub0, AddSub1) :
481 createFSub(AddSub0, AddSub1);
482 if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
483 const APFloat &F = CFP->getValueAPF();
486 } else if (Instruction *II = dyn_cast<Instruction>(NewAddSub))
487 II->setFastMathFlags(Flags);
490 Value *RI = createFMul(Factor, NewAddSub);
491 if (Instruction *II = dyn_cast<Instruction>(RI))
492 II->setFastMathFlags(Flags);
496 Value *RI = createFDiv(NewAddSub, Factor);
497 if (Instruction *II = dyn_cast<Instruction>(RI))
498 II->setFastMathFlags(Flags);
502 Value *FAddCombine::simplify(Instruction *I) {
503 assert(I->hasUnsafeAlgebra() && "Should be in unsafe mode");
505 // Currently we are not able to handle vector type.
506 if (I->getType()->isVectorTy())
509 assert((I->getOpcode() == Instruction::FAdd ||
510 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
512 // Save the instruction before calling other member-functions.
515 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
517 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
519 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
520 unsigned Opnd0_ExpNum = 0;
521 unsigned Opnd1_ExpNum = 0;
523 if (!Opnd0.isConstant())
524 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
526 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
527 if (OpndNum == 2 && !Opnd1.isConstant())
528 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
530 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
531 if (Opnd0_ExpNum && Opnd1_ExpNum) {
533 AllOpnds.push_back(&Opnd0_0);
534 AllOpnds.push_back(&Opnd1_0);
535 if (Opnd0_ExpNum == 2)
536 AllOpnds.push_back(&Opnd0_1);
537 if (Opnd1_ExpNum == 2)
538 AllOpnds.push_back(&Opnd1_1);
540 // Compute instruction quota. We should save at least one instruction.
541 unsigned InstQuota = 0;
543 Value *V0 = I->getOperand(0);
544 Value *V1 = I->getOperand(1);
545 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
546 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
548 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
553 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
554 // splitted into two addends, say "V = X - Y", the instruction would have
555 // been optimized into "I = Y - X" in the previous steps.
557 const FAddendCoef &CE = Opnd0.getCoef();
558 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
561 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
564 AllOpnds.push_back(&Opnd0);
565 AllOpnds.push_back(&Opnd1_0);
566 if (Opnd1_ExpNum == 2)
567 AllOpnds.push_back(&Opnd1_1);
569 if (Value *R = simplifyFAdd(AllOpnds, 1))
573 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
576 AllOpnds.push_back(&Opnd1);
577 AllOpnds.push_back(&Opnd0_0);
578 if (Opnd0_ExpNum == 2)
579 AllOpnds.push_back(&Opnd0_1);
581 if (Value *R = simplifyFAdd(AllOpnds, 1))
585 // step 6: Try factorization as the last resort,
586 return performFactorization(I);
589 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
590 unsigned AddendNum = Addends.size();
591 assert(AddendNum <= 4 && "Too many addends");
593 // For saving intermediate results;
594 unsigned NextTmpIdx = 0;
595 FAddend TmpResult[3];
597 // Points to the constant addend of the resulting simplified expression.
598 // If the resulting expr has constant-addend, this constant-addend is
599 // desirable to reside at the top of the resulting expression tree. Placing
600 // constant close to supper-expr(s) will potentially reveal some optimization
601 // opportunities in super-expr(s).
603 const FAddend *ConstAdd = nullptr;
605 // Simplified addends are placed <SimpVect>.
608 // The outer loop works on one symbolic-value at a time. Suppose the input
609 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
610 // The symbolic-values will be processed in this order: x, y, z.
612 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
614 const FAddend *ThisAddend = Addends[SymIdx];
616 // This addend was processed before.
620 Value *Val = ThisAddend->getSymVal();
621 unsigned StartIdx = SimpVect.size();
622 SimpVect.push_back(ThisAddend);
624 // The inner loop collects addends sharing same symbolic-value, and these
625 // addends will be later on folded into a single addend. Following above
626 // example, if the symbolic value "y" is being processed, the inner loop
627 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
628 // be later on folded into "<b1+b2, y>".
630 for (unsigned SameSymIdx = SymIdx + 1;
631 SameSymIdx < AddendNum; SameSymIdx++) {
632 const FAddend *T = Addends[SameSymIdx];
633 if (T && T->getSymVal() == Val) {
634 // Set null such that next iteration of the outer loop will not process
635 // this addend again.
636 Addends[SameSymIdx] = nullptr;
637 SimpVect.push_back(T);
641 // If multiple addends share same symbolic value, fold them together.
642 if (StartIdx + 1 != SimpVect.size()) {
643 FAddend &R = TmpResult[NextTmpIdx ++];
644 R = *SimpVect[StartIdx];
645 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
648 // Pop all addends being folded and push the resulting folded addend.
649 SimpVect.resize(StartIdx);
652 SimpVect.push_back(&R);
655 // Don't push constant addend at this time. It will be the last element
662 assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
663 "out-of-bound access");
666 SimpVect.push_back(ConstAdd);
669 if (!SimpVect.empty())
670 Result = createNaryFAdd(SimpVect, InstrQuota);
672 // The addition is folded to 0.0.
673 Result = ConstantFP::get(Instr->getType(), 0.0);
679 Value *FAddCombine::createNaryFAdd
680 (const AddendVect &Opnds, unsigned InstrQuota) {
681 assert(!Opnds.empty() && "Expect at least one addend");
683 // Step 1: Check if the # of instructions needed exceeds the quota.
685 unsigned InstrNeeded = calcInstrNumber(Opnds);
686 if (InstrNeeded > InstrQuota)
691 // step 2: Emit the N-ary addition.
692 // Note that at most three instructions are involved in Fadd-InstCombine: the
693 // addition in question, and at most two neighboring instructions.
694 // The resulting optimized addition should have at least one less instruction
695 // than the original addition expression tree. This implies that the resulting
696 // N-ary addition has at most two instructions, and we don't need to worry
697 // about tree-height when constructing the N-ary addition.
699 Value *LastVal = nullptr;
700 bool LastValNeedNeg = false;
702 // Iterate the addends, creating fadd/fsub using adjacent two addends.
703 for (const FAddend *Opnd : Opnds) {
705 Value *V = createAddendVal(*Opnd, NeedNeg);
708 LastValNeedNeg = NeedNeg;
712 if (LastValNeedNeg == NeedNeg) {
713 LastVal = createFAdd(LastVal, V);
718 LastVal = createFSub(V, LastVal);
720 LastVal = createFSub(LastVal, V);
722 LastValNeedNeg = false;
725 if (LastValNeedNeg) {
726 LastVal = createFNeg(LastVal);
730 assert(CreateInstrNum == InstrNeeded &&
731 "Inconsistent in instruction numbers");
737 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
738 Value *V = Builder->CreateFSub(Opnd0, Opnd1);
739 if (Instruction *I = dyn_cast<Instruction>(V))
740 createInstPostProc(I);
744 Value *FAddCombine::createFNeg(Value *V) {
745 Value *Zero = cast<Value>(ConstantFP::getZeroValueForNegation(V->getType()));
746 Value *NewV = createFSub(Zero, V);
747 if (Instruction *I = dyn_cast<Instruction>(NewV))
748 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
752 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
753 Value *V = Builder->CreateFAdd(Opnd0, Opnd1);
754 if (Instruction *I = dyn_cast<Instruction>(V))
755 createInstPostProc(I);
759 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
760 Value *V = Builder->CreateFMul(Opnd0, Opnd1);
761 if (Instruction *I = dyn_cast<Instruction>(V))
762 createInstPostProc(I);
766 Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
767 Value *V = Builder->CreateFDiv(Opnd0, Opnd1);
768 if (Instruction *I = dyn_cast<Instruction>(V))
769 createInstPostProc(I);
773 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
774 NewInstr->setDebugLoc(Instr->getDebugLoc());
776 // Keep track of the number of instruction created.
780 // Propagate fast-math flags
781 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
784 // Return the number of instruction needed to emit the N-ary addition.
785 // NOTE: Keep this function in sync with createAddendVal().
786 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
787 unsigned OpndNum = Opnds.size();
788 unsigned InstrNeeded = OpndNum - 1;
790 // The number of addends in the form of "(-1)*x".
791 unsigned NegOpndNum = 0;
793 // Adjust the number of instructions needed to emit the N-ary add.
794 for (const FAddend *Opnd : Opnds) {
795 if (Opnd->isConstant())
798 // The constant check above is really for a few special constant
800 if (isa<UndefValue>(Opnd->getSymVal()))
803 const FAddendCoef &CE = Opnd->getCoef();
804 if (CE.isMinusOne() || CE.isMinusTwo())
807 // Let the addend be "c * x". If "c == +/-1", the value of the addend
808 // is immediately available; otherwise, it needs exactly one instruction
809 // to evaluate the value.
810 if (!CE.isMinusOne() && !CE.isOne())
813 if (NegOpndNum == OpndNum)
818 // Input Addend Value NeedNeg(output)
819 // ================================================================
820 // Constant C C false
821 // <+/-1, V> V coefficient is -1
822 // <2/-2, V> "fadd V, V" coefficient is -2
823 // <C, V> "fmul V, C" false
825 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
826 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
827 const FAddendCoef &Coeff = Opnd.getCoef();
829 if (Opnd.isConstant()) {
831 return Coeff.getValue(Instr->getType());
834 Value *OpndVal = Opnd.getSymVal();
836 if (Coeff.isMinusOne() || Coeff.isOne()) {
837 NeedNeg = Coeff.isMinusOne();
841 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
842 NeedNeg = Coeff.isMinusTwo();
843 return createFAdd(OpndVal, OpndVal);
847 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
850 /// \brief Return true if we can prove that adding the two values of the
851 /// knownbits will not overflow.
852 /// Otherwise return false.
853 static bool checkRippleForAdd(const KnownBits &LHSKnown,
854 const KnownBits &RHSKnown) {
855 // Addition of two 2's complement numbers having opposite signs will never
857 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
858 (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
861 // If either of the values is known to be non-negative, adding them can only
862 // overflow if the second is also non-negative, so we can assume that.
863 // Two non-negative numbers will only overflow if there is a carry to the
864 // sign bit, so we can check if even when the values are as big as possible
865 // there is no overflow to the sign bit.
866 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
867 APInt MaxLHS = ~LHSKnown.Zero;
868 MaxLHS.clearSignBit();
869 APInt MaxRHS = ~RHSKnown.Zero;
870 MaxRHS.clearSignBit();
871 APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
872 return Result.isSignBitClear();
875 // If either of the values is known to be negative, adding them can only
876 // overflow if the second is also negative, so we can assume that.
877 // Two negative number will only overflow if there is no carry to the sign
878 // bit, so we can check if even when the values are as small as possible
879 // there is overflow to the sign bit.
880 if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
881 APInt MinLHS = LHSKnown.One;
882 MinLHS.clearSignBit();
883 APInt MinRHS = RHSKnown.One;
884 MinRHS.clearSignBit();
885 APInt Result = std::move(MinLHS) + std::move(MinRHS);
886 return Result.isSignBitSet();
889 // If we reached here it means that we know nothing about the sign bits.
890 // In this case we can't know if there will be an overflow, since by
891 // changing the sign bits any two values can be made to overflow.
895 /// Return true if we can prove that:
896 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
897 /// This basically requires proving that the add in the original type would not
898 /// overflow to change the sign bit or have a carry out.
899 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
901 // There are different heuristics we can use for this. Here are some simple
904 // If LHS and RHS each have at least two sign bits, the addition will look
910 // If the carry into the most significant position is 0, X and Y can't both
911 // be 1 and therefore the carry out of the addition is also 0.
913 // If the carry into the most significant position is 1, X and Y can't both
914 // be 0 and therefore the carry out of the addition is also 1.
916 // Since the carry into the most significant position is always equal to
917 // the carry out of the addition, there is no signed overflow.
918 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
919 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
922 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
923 KnownBits LHSKnown(BitWidth);
924 computeKnownBits(LHS, LHSKnown, 0, &CxtI);
926 KnownBits RHSKnown(BitWidth);
927 computeKnownBits(RHS, RHSKnown, 0, &CxtI);
929 // Check if carry bit of addition will not cause overflow.
930 if (checkRippleForAdd(LHSKnown, RHSKnown))
936 /// \brief Return true if we can prove that:
937 /// (sub LHS, RHS) === (sub nsw LHS, RHS)
938 /// This basically requires proving that the add in the original type would not
939 /// overflow to change the sign bit or have a carry out.
940 /// TODO: Handle this for Vectors.
941 bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,
943 // If LHS and RHS each have at least two sign bits, the subtraction
945 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
946 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
949 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
950 KnownBits LHSKnown(BitWidth);
951 computeKnownBits(LHS, LHSKnown, 0, &CxtI);
953 KnownBits RHSKnown(BitWidth);
954 computeKnownBits(RHS, RHSKnown, 0, &CxtI);
956 // Subtraction of two 2's complement numbers having identical signs will
958 if ((LHSKnown.One[BitWidth - 1] && RHSKnown.One[BitWidth - 1]) ||
959 (LHSKnown.Zero[BitWidth - 1] && RHSKnown.Zero[BitWidth - 1]))
962 // TODO: implement logic similar to checkRippleForAdd
966 /// \brief Return true if we can prove that:
967 /// (sub LHS, RHS) === (sub nuw LHS, RHS)
968 bool InstCombiner::WillNotOverflowUnsignedSub(Value *LHS, Value *RHS,
970 // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
971 bool LHSKnownNonNegative, LHSKnownNegative;
972 bool RHSKnownNonNegative, RHSKnownNegative;
973 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0,
975 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0,
977 if (LHSKnownNegative && RHSKnownNonNegative)
983 // Checks if any operand is negative and we can convert add to sub.
984 // This function checks for following negative patterns
985 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
986 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
987 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
988 static Value *checkForNegativeOperand(BinaryOperator &I,
989 InstCombiner::BuilderTy *Builder) {
990 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
992 // This function creates 2 instructions to replace ADD, we need at least one
993 // of LHS or RHS to have one use to ensure benefit in transform.
994 if (!LHS->hasOneUse() && !RHS->hasOneUse())
997 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
998 const APInt *C1 = nullptr, *C2 = nullptr;
1000 // if ONE is on other side, swap
1001 if (match(RHS, m_Add(m_Value(X), m_One())))
1002 std::swap(LHS, RHS);
1004 if (match(LHS, m_Add(m_Value(X), m_One()))) {
1005 // if XOR on other side, swap
1006 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
1009 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
1010 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
1011 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
1012 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
1013 Value *NewAnd = Builder->CreateAnd(Z, *C1);
1014 return Builder->CreateSub(RHS, NewAnd, "sub");
1015 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
1016 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
1017 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
1018 Value *NewOr = Builder->CreateOr(Z, ~(*C1));
1019 return Builder->CreateSub(RHS, NewOr, "sub");
1024 // Restore LHS and RHS
1025 LHS = I.getOperand(0);
1026 RHS = I.getOperand(1);
1028 // if XOR is on other side, swap
1029 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
1030 std::swap(LHS, RHS);
1033 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
1034 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
1035 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
1036 if (C1->countTrailingZeros() == 0)
1037 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
1038 Value *NewOr = Builder->CreateOr(Z, ~(*C2));
1039 return Builder->CreateSub(RHS, NewOr, "sub");
1044 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
1045 bool Changed = SimplifyAssociativeOrCommutative(I);
1046 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1048 if (Value *V = SimplifyVectorOp(I))
1049 return replaceInstUsesWith(I, V);
1051 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
1052 I.hasNoUnsignedWrap(), SQ))
1053 return replaceInstUsesWith(I, V);
1055 // (A*B)+(A*C) -> A*(B+C) etc
1056 if (Value *V = SimplifyUsingDistributiveLaws(I))
1057 return replaceInstUsesWith(I, V);
1060 if (match(RHS, m_APInt(RHSC))) {
1061 if (RHSC->isSignMask()) {
1062 // If wrapping is not allowed, then the addition must set the sign bit:
1063 // X + (signmask) --> X | signmask
1064 if (I.hasNoSignedWrap() || I.hasNoUnsignedWrap())
1065 return BinaryOperator::CreateOr(LHS, RHS);
1067 // If wrapping is allowed, then the addition flips the sign bit of LHS:
1068 // X + (signmask) --> X ^ signmask
1069 return BinaryOperator::CreateXor(LHS, RHS);
1072 // Is this add the last step in a convoluted sext?
1075 if (match(LHS, m_ZExt(m_Xor(m_Value(X), m_APInt(C)))) &&
1076 C->isMinSignedValue() &&
1077 C->sext(LHS->getType()->getScalarSizeInBits()) == *RHSC) {
1078 // add(zext(xor i16 X, -32768), -32768) --> sext X
1079 return CastInst::Create(Instruction::SExt, X, LHS->getType());
1082 if (RHSC->isNegative() &&
1083 match(LHS, m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C)))) &&
1084 RHSC->sge(-C->sext(RHSC->getBitWidth()))) {
1085 // (add (zext (add nuw X, C)), Val) -> (zext (add nuw X, C+Val))
1087 ConstantInt::get(X->getType(), *C + RHSC->trunc(C->getBitWidth()));
1088 return new ZExtInst(Builder->CreateNUWAdd(X, NewC), I.getType());
1092 // FIXME: Use the match above instead of dyn_cast to allow these transforms
1093 // for splat vectors.
1094 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1095 // zext(bool) + C -> bool ? C + 1 : C
1096 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
1097 if (ZI->getSrcTy()->isIntegerTy(1))
1098 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
1100 Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
1101 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
1102 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
1103 const APInt &RHSVal = CI->getValue();
1104 unsigned ExtendAmt = 0;
1105 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
1106 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
1107 if (XorRHS->getValue() == -RHSVal) {
1108 if (RHSVal.isPowerOf2())
1109 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
1110 else if (XorRHS->getValue().isPowerOf2())
1111 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
1115 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
1116 if (!MaskedValueIsZero(XorLHS, Mask, 0, &I))
1121 Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
1122 Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
1123 return BinaryOperator::CreateAShr(NewShl, ShAmt);
1126 // If this is a xor that was canonicalized from a sub, turn it back into
1127 // a sub and fuse this add with it.
1128 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
1129 IntegerType *IT = cast<IntegerType>(I.getType());
1130 KnownBits LHSKnown(IT->getBitWidth());
1131 computeKnownBits(XorLHS, LHSKnown, 0, &I);
1132 if ((XorRHS->getValue() | LHSKnown.Zero).isAllOnesValue())
1133 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
1136 // (X + signmask) + C could have gotten canonicalized to (X^signmask) + C,
1137 // transform them into (X + (signmask ^ C))
1138 if (XorRHS->getValue().isSignMask())
1139 return BinaryOperator::CreateAdd(XorLHS,
1140 ConstantExpr::getXor(XorRHS, CI));
1144 if (isa<Constant>(RHS))
1145 if (Instruction *NV = foldOpWithConstantIntoOperand(I))
1148 if (I.getType()->getScalarType()->isIntegerTy(1))
1149 return BinaryOperator::CreateXor(LHS, RHS);
1153 BinaryOperator *New =
1154 BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
1155 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1156 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1161 // -A + -B --> -(A + B)
1162 if (Value *LHSV = dyn_castNegVal(LHS)) {
1163 if (!isa<Constant>(RHS))
1164 if (Value *RHSV = dyn_castNegVal(RHS)) {
1165 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
1166 return BinaryOperator::CreateNeg(NewAdd);
1169 return BinaryOperator::CreateSub(RHS, LHSV);
1173 if (!isa<Constant>(RHS))
1174 if (Value *V = dyn_castNegVal(RHS))
1175 return BinaryOperator::CreateSub(LHS, V);
1177 if (Value *V = checkForNegativeOperand(I, Builder))
1178 return replaceInstUsesWith(I, V);
1180 // A+B --> A|B iff A and B have no bits set in common.
1181 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1182 return BinaryOperator::CreateOr(LHS, RHS);
1184 if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
1186 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
1187 return BinaryOperator::CreateSub(SubOne(CRHS), X);
1190 // FIXME: We already did a check for ConstantInt RHS above this.
1191 // FIXME: Is this pattern covered by another fold? No regression tests fail on
1193 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
1194 // (X & FF00) + xx00 -> (X+xx00) & FF00
1197 if (LHS->hasOneUse() &&
1198 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
1199 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
1200 // See if all bits from the first bit set in the Add RHS up are included
1201 // in the mask. First, get the rightmost bit.
1202 const APInt &AddRHSV = CRHS->getValue();
1204 // Form a mask of all bits from the lowest bit added through the top.
1205 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
1207 // See if the and mask includes all of these bits.
1208 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
1210 if (AddRHSHighBits == AddRHSHighBitsAnd) {
1211 // Okay, the xform is safe. Insert the new add pronto.
1212 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
1213 return BinaryOperator::CreateAnd(NewAdd, C2);
1218 // add (select X 0 (sub n A)) A --> select X A n
1220 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1223 SI = dyn_cast<SelectInst>(RHS);
1226 if (SI && SI->hasOneUse()) {
1227 Value *TV = SI->getTrueValue();
1228 Value *FV = SI->getFalseValue();
1231 // Can we fold the add into the argument of the select?
1232 // We check both true and false select arguments for a matching subtract.
1233 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1234 // Fold the add into the true select value.
1235 return SelectInst::Create(SI->getCondition(), N, A);
1237 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1238 // Fold the add into the false select value.
1239 return SelectInst::Create(SI->getCondition(), A, N);
1243 // Check for (add (sext x), y), see if we can merge this into an
1244 // integer add followed by a sext.
1245 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
1246 // (add (sext x), cst) --> (sext (add x, cst'))
1247 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1248 if (LHSConv->hasOneUse()) {
1250 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1251 if (ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
1252 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
1253 // Insert the new, smaller add.
1255 Builder->CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
1256 return new SExtInst(NewAdd, I.getType());
1261 // (add (sext x), (sext y)) --> (sext (add int x, y))
1262 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
1263 // Only do this if x/y have the same type, if at least one of them has a
1264 // single use (so we don't increase the number of sexts), and if the
1265 // integer add will not overflow.
1266 if (LHSConv->getOperand(0)->getType() ==
1267 RHSConv->getOperand(0)->getType() &&
1268 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1269 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1270 RHSConv->getOperand(0), I)) {
1271 // Insert the new integer add.
1272 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1273 RHSConv->getOperand(0), "addconv");
1274 return new SExtInst(NewAdd, I.getType());
1279 // Check for (add (zext x), y), see if we can merge this into an
1280 // integer add followed by a zext.
1281 if (auto *LHSConv = dyn_cast<ZExtInst>(LHS)) {
1282 // (add (zext x), cst) --> (zext (add x, cst'))
1283 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1284 if (LHSConv->hasOneUse()) {
1286 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1287 if (ConstantExpr::getZExt(CI, I.getType()) == RHSC &&
1288 computeOverflowForUnsignedAdd(LHSConv->getOperand(0), CI, &I) ==
1289 OverflowResult::NeverOverflows) {
1290 // Insert the new, smaller add.
1292 Builder->CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
1293 return new ZExtInst(NewAdd, I.getType());
1298 // (add (zext x), (zext y)) --> (zext (add int x, y))
1299 if (auto *RHSConv = dyn_cast<ZExtInst>(RHS)) {
1300 // Only do this if x/y have the same type, if at least one of them has a
1301 // single use (so we don't increase the number of zexts), and if the
1302 // integer add will not overflow.
1303 if (LHSConv->getOperand(0)->getType() ==
1304 RHSConv->getOperand(0)->getType() &&
1305 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1306 computeOverflowForUnsignedAdd(LHSConv->getOperand(0),
1307 RHSConv->getOperand(0),
1308 &I) == OverflowResult::NeverOverflows) {
1309 // Insert the new integer add.
1310 Value *NewAdd = Builder->CreateNUWAdd(
1311 LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv");
1312 return new ZExtInst(NewAdd, I.getType());
1317 // (add (xor A, B) (and A, B)) --> (or A, B)
1319 Value *A = nullptr, *B = nullptr;
1320 if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
1321 match(LHS, m_c_And(m_Specific(A), m_Specific(B))))
1322 return BinaryOperator::CreateOr(A, B);
1324 if (match(LHS, m_Xor(m_Value(A), m_Value(B))) &&
1325 match(RHS, m_c_And(m_Specific(A), m_Specific(B))))
1326 return BinaryOperator::CreateOr(A, B);
1329 // (add (or A, B) (and A, B)) --> (add A, B)
1331 Value *A = nullptr, *B = nullptr;
1332 if (match(RHS, m_Or(m_Value(A), m_Value(B))) &&
1333 match(LHS, m_c_And(m_Specific(A), m_Specific(B)))) {
1334 auto *New = BinaryOperator::CreateAdd(A, B);
1335 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1336 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1340 if (match(LHS, m_Or(m_Value(A), m_Value(B))) &&
1341 match(RHS, m_c_And(m_Specific(A), m_Specific(B)))) {
1342 auto *New = BinaryOperator::CreateAdd(A, B);
1343 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1344 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1349 // TODO(jingyue): Consider WillNotOverflowSignedAdd and
1350 // WillNotOverflowUnsignedAdd to reduce the number of invocations of
1351 // computeKnownBits.
1352 if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS, I)) {
1354 I.setHasNoSignedWrap(true);
1356 if (!I.hasNoUnsignedWrap() &&
1357 computeOverflowForUnsignedAdd(LHS, RHS, &I) ==
1358 OverflowResult::NeverOverflows) {
1360 I.setHasNoUnsignedWrap(true);
1363 return Changed ? &I : nullptr;
1366 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
1367 bool Changed = SimplifyAssociativeOrCommutative(I);
1368 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1370 if (Value *V = SimplifyVectorOp(I))
1371 return replaceInstUsesWith(I, V);
1373 if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), SQ))
1374 return replaceInstUsesWith(I, V);
1376 if (isa<Constant>(RHS))
1377 if (Instruction *FoldedFAdd = foldOpWithConstantIntoOperand(I))
1381 // -A + -B --> -(A + B)
1382 if (Value *LHSV = dyn_castFNegVal(LHS)) {
1383 Instruction *RI = BinaryOperator::CreateFSub(RHS, LHSV);
1384 RI->copyFastMathFlags(&I);
1389 if (!isa<Constant>(RHS))
1390 if (Value *V = dyn_castFNegVal(RHS)) {
1391 Instruction *RI = BinaryOperator::CreateFSub(LHS, V);
1392 RI->copyFastMathFlags(&I);
1396 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1397 // integer add followed by a promotion.
1398 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1399 Value *LHSIntVal = LHSConv->getOperand(0);
1400 Type *FPType = LHSConv->getType();
1402 // TODO: This check is overly conservative. In many cases known bits
1403 // analysis can tell us that the result of the addition has less significant
1404 // bits than the integer type can hold.
1405 auto IsValidPromotion = [](Type *FTy, Type *ITy) {
1406 Type *FScalarTy = FTy->getScalarType();
1407 Type *IScalarTy = ITy->getScalarType();
1409 // Do we have enough bits in the significand to represent the result of
1410 // the integer addition?
1411 unsigned MaxRepresentableBits =
1412 APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
1413 return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
1416 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1417 // ... if the constant fits in the integer value. This is useful for things
1418 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1419 // requires a constant pool load, and generally allows the add to be better
1421 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1422 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1424 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1425 if (LHSConv->hasOneUse() &&
1426 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1427 WillNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1428 // Insert the new integer add.
1429 Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
1431 return new SIToFPInst(NewAdd, I.getType());
1435 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1436 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1437 Value *RHSIntVal = RHSConv->getOperand(0);
1438 // It's enough to check LHS types only because we require int types to
1439 // be the same for this transform.
1440 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1441 // Only do this if x/y have the same type, if at least one of them has a
1442 // single use (so we don't increase the number of int->fp conversions),
1443 // and if the integer add will not overflow.
1444 if (LHSIntVal->getType() == RHSIntVal->getType() &&
1445 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1446 WillNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1447 // Insert the new integer add.
1448 Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
1449 RHSIntVal, "addconv");
1450 return new SIToFPInst(NewAdd, I.getType());
1456 // select C, 0, B + select C, A, 0 -> select C, A, B
1458 Value *A1, *B1, *C1, *A2, *B2, *C2;
1459 if (match(LHS, m_Select(m_Value(C1), m_Value(A1), m_Value(B1))) &&
1460 match(RHS, m_Select(m_Value(C2), m_Value(A2), m_Value(B2)))) {
1462 Constant *Z1=nullptr, *Z2=nullptr;
1463 Value *A, *B, *C=C1;
1464 if (match(A1, m_AnyZero()) && match(B2, m_AnyZero())) {
1465 Z1 = dyn_cast<Constant>(A1); A = A2;
1466 Z2 = dyn_cast<Constant>(B2); B = B1;
1467 } else if (match(B1, m_AnyZero()) && match(A2, m_AnyZero())) {
1468 Z1 = dyn_cast<Constant>(B1); B = B2;
1469 Z2 = dyn_cast<Constant>(A2); A = A1;
1473 (I.hasNoSignedZeros() ||
1474 (Z1->isNegativeZeroValue() && Z2->isNegativeZeroValue()))) {
1475 return SelectInst::Create(C, A, B);
1481 if (I.hasUnsafeAlgebra()) {
1482 if (Value *V = FAddCombine(Builder).simplify(&I))
1483 return replaceInstUsesWith(I, V);
1486 return Changed ? &I : nullptr;
1489 /// Optimize pointer differences into the same array into a size. Consider:
1490 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1491 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1493 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
1495 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1497 bool Swapped = false;
1498 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1500 // For now we require one side to be the base pointer "A" or a constant
1501 // GEP derived from it.
1502 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1504 if (LHSGEP->getOperand(0) == RHS) {
1507 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1508 // (gep X, ...) - (gep X, ...)
1509 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1510 RHSGEP->getOperand(0)->stripPointerCasts()) {
1518 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1520 if (RHSGEP->getOperand(0) == LHS) {
1523 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1524 // (gep X, ...) - (gep X, ...)
1525 if (RHSGEP->getOperand(0)->stripPointerCasts() ==
1526 LHSGEP->getOperand(0)->stripPointerCasts()) {
1534 // Avoid duplicating the arithmetic if GEP2 has non-constant indices and
1537 (GEP2 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
1540 // Emit the offset of the GEP and an intptr_t.
1541 Value *Result = EmitGEPOffset(GEP1);
1543 // If we had a constant expression GEP on the other side offsetting the
1544 // pointer, subtract it from the offset we have.
1546 Value *Offset = EmitGEPOffset(GEP2);
1547 Result = Builder->CreateSub(Result, Offset);
1550 // If we have p - gep(p, ...) then we have to negate the result.
1552 Result = Builder->CreateNeg(Result, "diff.neg");
1554 return Builder->CreateIntCast(Result, Ty, true);
1557 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
1558 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1560 if (Value *V = SimplifyVectorOp(I))
1561 return replaceInstUsesWith(I, V);
1563 if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
1564 I.hasNoUnsignedWrap(), SQ))
1565 return replaceInstUsesWith(I, V);
1567 // (A*B)-(A*C) -> A*(B-C) etc
1568 if (Value *V = SimplifyUsingDistributiveLaws(I))
1569 return replaceInstUsesWith(I, V);
1571 // If this is a 'B = x-(-A)', change to B = x+A.
1572 if (Value *V = dyn_castNegVal(Op1)) {
1573 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1575 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1576 assert(BO->getOpcode() == Instruction::Sub &&
1577 "Expected a subtraction operator!");
1578 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1579 Res->setHasNoSignedWrap(true);
1581 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1582 Res->setHasNoSignedWrap(true);
1588 if (I.getType()->getScalarType()->isIntegerTy(1))
1589 return BinaryOperator::CreateXor(Op0, Op1);
1591 // Replace (-1 - A) with (~A).
1592 if (match(Op0, m_AllOnes()))
1593 return BinaryOperator::CreateNot(Op1);
1595 if (Constant *C = dyn_cast<Constant>(Op0)) {
1596 // C - ~X == X + (1+C)
1598 if (match(Op1, m_Not(m_Value(X))))
1599 return BinaryOperator::CreateAdd(X, AddOne(C));
1601 // Try to fold constant sub into select arguments.
1602 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1603 if (Instruction *R = FoldOpIntoSelect(I, SI))
1606 // Try to fold constant sub into PHI values.
1607 if (PHINode *PN = dyn_cast<PHINode>(Op1))
1608 if (Instruction *R = foldOpIntoPhi(I, PN))
1611 // C-(X+C2) --> (C-C2)-X
1613 if (match(Op1, m_Add(m_Value(X), m_Constant(C2))))
1614 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1616 // Fold (sub 0, (zext bool to B)) --> (sext bool to B)
1617 if (C->isNullValue() && match(Op1, m_ZExt(m_Value(X))))
1618 if (X->getType()->getScalarType()->isIntegerTy(1))
1619 return CastInst::CreateSExtOrBitCast(X, Op1->getType());
1621 // Fold (sub 0, (sext bool to B)) --> (zext bool to B)
1622 if (C->isNullValue() && match(Op1, m_SExt(m_Value(X))))
1623 if (X->getType()->getScalarType()->isIntegerTy(1))
1624 return CastInst::CreateZExtOrBitCast(X, Op1->getType());
1628 if (match(Op0, m_APInt(Op0C))) {
1629 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1631 // -(X >>u 31) -> (X >>s 31)
1632 // -(X >>s 31) -> (X >>u 31)
1636 if (match(Op1, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1637 *ShAmt == BitWidth - 1) {
1638 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1639 return BinaryOperator::CreateAShr(X, ShAmtOp);
1641 if (match(Op1, m_AShr(m_Value(X), m_APInt(ShAmt))) &&
1642 *ShAmt == BitWidth - 1) {
1643 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1644 return BinaryOperator::CreateLShr(X, ShAmtOp);
1648 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1650 if (Op0C->isMask()) {
1651 KnownBits RHSKnown(BitWidth);
1652 computeKnownBits(Op1, RHSKnown, 0, &I);
1653 if ((*Op0C | RHSKnown.Zero).isAllOnesValue())
1654 return BinaryOperator::CreateXor(Op1, Op0);
1660 // X-(X+Y) == -Y X-(Y+X) == -Y
1661 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
1662 return BinaryOperator::CreateNeg(Y);
1665 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1666 return BinaryOperator::CreateNeg(Y);
1669 // (sub (or A, B) (xor A, B)) --> (and A, B)
1672 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1673 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1674 return BinaryOperator::CreateAnd(A, B);
1679 // ((X | Y) - X) --> (~X & Y)
1680 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
1681 return BinaryOperator::CreateAnd(
1682 Y, Builder->CreateNot(Op1, Op1->getName() + ".not"));
1685 if (Op1->hasOneUse()) {
1686 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
1687 Constant *C = nullptr;
1689 // (X - (Y - Z)) --> (X + (Z - Y)).
1690 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
1691 return BinaryOperator::CreateAdd(Op0,
1692 Builder->CreateSub(Z, Y, Op1->getName()));
1694 // (X - (X & Y)) --> (X & ~Y)
1696 if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
1697 return BinaryOperator::CreateAnd(Op0,
1698 Builder->CreateNot(Y, Y->getName() + ".not"));
1700 // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
1701 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
1702 C->isNotMinSignedValue() && !C->isOneValue())
1703 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
1705 // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
1706 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
1707 if (Value *XNeg = dyn_castNegVal(X))
1708 return BinaryOperator::CreateShl(XNeg, Y);
1710 // Subtracting -1/0 is the same as adding 1/0:
1711 // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y)
1712 // 'nuw' is dropped in favor of the canonical form.
1713 if (match(Op1, m_SExt(m_Value(Y))) &&
1714 Y->getType()->getScalarSizeInBits() == 1) {
1715 Value *Zext = Builder->CreateZExt(Y, I.getType());
1716 BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
1717 Add->setHasNoSignedWrap(I.hasNoSignedWrap());
1721 // X - A*-B -> X + A*B
1722 // X - -A*B -> X + A*B
1725 if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
1726 return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
1728 // X - A*CI -> X + A*-CI
1729 // No need to handle commuted multiply because multiply handling will
1730 // ensure constant will be move to the right hand side.
1731 if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) {
1732 Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
1733 return BinaryOperator::CreateAdd(Op0, NewMul);
1737 // Optimize pointer differences into the same array into a size. Consider:
1738 // &A[10] - &A[0]: we should compile this to "10".
1739 Value *LHSOp, *RHSOp;
1740 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1741 match(Op1, m_PtrToInt(m_Value(RHSOp))))
1742 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1743 return replaceInstUsesWith(I, Res);
1745 // trunc(p)-trunc(q) -> trunc(p-q)
1746 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1747 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1748 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1749 return replaceInstUsesWith(I, Res);
1751 bool Changed = false;
1752 if (!I.hasNoSignedWrap() && WillNotOverflowSignedSub(Op0, Op1, I)) {
1754 I.setHasNoSignedWrap(true);
1756 if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedSub(Op0, Op1, I)) {
1758 I.setHasNoUnsignedWrap(true);
1761 return Changed ? &I : nullptr;
1764 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
1765 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1767 if (Value *V = SimplifyVectorOp(I))
1768 return replaceInstUsesWith(I, V);
1770 if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), SQ))
1771 return replaceInstUsesWith(I, V);
1773 // fsub nsz 0, X ==> fsub nsz -0.0, X
1774 if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_Zero())) {
1775 // Subtraction from -0.0 is the canonical form of fneg.
1776 Instruction *NewI = BinaryOperator::CreateFNeg(Op1);
1777 NewI->copyFastMathFlags(&I);
1781 if (isa<Constant>(Op0))
1782 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1783 if (Instruction *NV = FoldOpIntoSelect(I, SI))
1786 // If this is a 'B = x-(-A)', change to B = x+A, potentially looking
1787 // through FP extensions/truncations along the way.
1788 if (Value *V = dyn_castFNegVal(Op1)) {
1789 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, V);
1790 NewI->copyFastMathFlags(&I);
1793 if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
1794 if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
1795 Value *NewTrunc = Builder->CreateFPTrunc(V, I.getType());
1796 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
1797 NewI->copyFastMathFlags(&I);
1800 } else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
1801 if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
1802 Value *NewExt = Builder->CreateFPExt(V, I.getType());
1803 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
1804 NewI->copyFastMathFlags(&I);
1809 if (I.hasUnsafeAlgebra()) {
1810 if (Value *V = FAddCombine(Builder).simplify(&I))
1811 return replaceInstUsesWith(I, V);