1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for add, fadd, sub, and fsub.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/InstrTypes.h"
24 #include "llvm/IR/Instruction.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Support/AlignOf.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/KnownBits.h"
37 using namespace PatternMatch;
39 #define DEBUG_TYPE "instcombine"
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
70 void set(const APFloat& C);
74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
77 bool isOne() const { return isInt() && IntVal == 1; }
78 bool isTwo() const { return isInt() && IntVal == 2; }
79 bool isMinusOne() const { return isInt() && IntVal == -1; }
80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
85 APFloat *getFpValPtr()
86 { return reinterpret_cast<APFloat *>(&FpValBuf.buffer[0]); }
88 const APFloat *getFpValPtr() const
89 { return reinterpret_cast<const APFloat *>(&FpValBuf.buffer[0]); }
91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
101 bool isInt() const { return !IsFp; }
103 // If the coefficient is represented by an integer, promote it to a
105 void convertToFpType(const fltSemantics &Sem);
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
123 AlignedCharArrayUnion<APFloat> FpValBuf;
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
138 Value *getSymVal() const { return Val; }
139 const FAddendCoef &getCoef() const { return Coeff; }
141 bool isConstant() const { return Val == nullptr; }
142 bool isZero() const { return Coeff.isZero(); }
144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
157 void negate() { Coeff.negate(); }
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
182 Value *simplify(Instruction *FAdd);
185 using AddendVect = SmallVector<const FAddend *, 4>;
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
189 Value *performFactorization(Instruction *I);
191 /// Convert given addend to a Value
192 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
194 /// Return the number of instructions needed to emit the N-ary addition.
195 unsigned calcInstrNumber(const AddendVect& Vect);
197 Value *createFSub(Value *Opnd0, Value *Opnd1);
198 Value *createFAdd(Value *Opnd0, Value *Opnd1);
199 Value *createFMul(Value *Opnd0, Value *Opnd1);
200 Value *createFDiv(Value *Opnd0, Value *Opnd1);
201 Value *createFNeg(Value *V);
202 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
203 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
205 // Debugging stuff are clustered here.
207 unsigned CreateInstrNum;
208 void initCreateInstNum() { CreateInstrNum = 0; }
209 void incCreateInstNum() { CreateInstrNum++; }
211 void initCreateInstNum() {}
212 void incCreateInstNum() {}
215 InstCombiner::BuilderTy &Builder;
216 Instruction *Instr = nullptr;
219 } // end anonymous namespace
221 //===----------------------------------------------------------------------===//
224 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
226 //===----------------------------------------------------------------------===//
227 FAddendCoef::~FAddendCoef() {
229 getFpValPtr()->~APFloat();
232 void FAddendCoef::set(const APFloat& C) {
233 APFloat *P = getFpValPtr();
236 // As the buffer is meanless byte stream, we cannot call
237 // APFloat::operator=().
242 IsFp = BufHasFpVal = true;
245 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
249 APFloat *P = getFpValPtr();
251 new(P) APFloat(Sem, IntVal);
253 new(P) APFloat(Sem, 0 - IntVal);
256 IsFp = BufHasFpVal = true;
259 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
261 return APFloat(Sem, Val);
263 APFloat T(Sem, 0 - Val);
269 void FAddendCoef::operator=(const FAddendCoef &That) {
273 set(That.getFpVal());
276 void FAddendCoef::operator+=(const FAddendCoef &That) {
277 enum APFloat::roundingMode RndMode = APFloat::rmNearestTiesToEven;
278 if (isInt() == That.isInt()) {
280 IntVal += That.IntVal;
282 getFpVal().add(That.getFpVal(), RndMode);
287 const APFloat &T = That.getFpVal();
288 convertToFpType(T.getSemantics());
289 getFpVal().add(T, RndMode);
293 APFloat &T = getFpVal();
294 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
297 void FAddendCoef::operator*=(const FAddendCoef &That) {
301 if (That.isMinusOne()) {
306 if (isInt() && That.isInt()) {
307 int Res = IntVal * (int)That.IntVal;
308 assert(!insaneIntVal(Res) && "Insane int value");
313 const fltSemantics &Semantic =
314 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
317 convertToFpType(Semantic);
318 APFloat &F0 = getFpVal();
321 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
322 APFloat::rmNearestTiesToEven);
324 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
327 void FAddendCoef::negate() {
331 getFpVal().changeSign();
334 Value *FAddendCoef::getValue(Type *Ty) const {
336 ConstantFP::get(Ty, float(IntVal)) :
337 ConstantFP::get(Ty->getContext(), getFpVal());
340 // The definition of <Val> Addends
341 // =========================================
342 // A + B <1, A>, <1,B>
343 // A - B <1, A>, <1,B>
346 // A + C <1, A> <C, NULL>
347 // 0 +/- 0 <0, NULL> (corner case)
349 // Legend: A and B are not constant, C is constant
350 unsigned FAddend::drillValueDownOneStep
351 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
352 Instruction *I = nullptr;
353 if (!Val || !(I = dyn_cast<Instruction>(Val)))
356 unsigned Opcode = I->getOpcode();
358 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
360 Value *Opnd0 = I->getOperand(0);
361 Value *Opnd1 = I->getOperand(1);
362 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
365 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
370 Addend0.set(1, Opnd0);
372 Addend0.set(C0, nullptr);
376 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
378 Addend.set(1, Opnd1);
380 Addend.set(C1, nullptr);
381 if (Opcode == Instruction::FSub)
386 return Opnd0 && Opnd1 ? 2 : 1;
388 // Both operands are zero. Weird!
389 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
393 if (I->getOpcode() == Instruction::FMul) {
394 Value *V0 = I->getOperand(0);
395 Value *V1 = I->getOperand(1);
396 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
401 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
410 // Try to break *this* addend into two addends. e.g. Suppose this addend is
411 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
412 // i.e. <2.3, X> and <2.3, Y>.
413 unsigned FAddend::drillAddendDownOneStep
414 (FAddend &Addend0, FAddend &Addend1) const {
418 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
419 if (!BreakNum || Coeff.isOne())
422 Addend0.Scale(Coeff);
425 Addend1.Scale(Coeff);
430 // Try to perform following optimization on the input instruction I. Return the
431 // simplified expression if was successful; otherwise, return 0.
433 // Instruction "I" is Simplified into
434 // -------------------------------------------------------
435 // (x * y) +/- (x * z) x * (y +/- z)
436 // (y / x) +/- (z / x) (y +/- z) / x
437 Value *FAddCombine::performFactorization(Instruction *I) {
438 assert((I->getOpcode() == Instruction::FAdd ||
439 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
441 Instruction *I0 = dyn_cast<Instruction>(I->getOperand(0));
442 Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1));
444 if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
448 if (I0->getOpcode() == Instruction::FMul)
450 else if (I0->getOpcode() != Instruction::FDiv)
453 Value *Opnd0_0 = I0->getOperand(0);
454 Value *Opnd0_1 = I0->getOperand(1);
455 Value *Opnd1_0 = I1->getOperand(0);
456 Value *Opnd1_1 = I1->getOperand(1);
458 // Input Instr I Factor AddSub0 AddSub1
459 // ----------------------------------------------
460 // (x*y) +/- (x*z) x y z
461 // (y/x) +/- (z/x) x y z
462 Value *Factor = nullptr;
463 Value *AddSub0 = nullptr, *AddSub1 = nullptr;
466 if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
468 else if (Opnd0_1 == Opnd1_0 || Opnd0_1 == Opnd1_1)
472 AddSub0 = (Factor == Opnd0_0) ? Opnd0_1 : Opnd0_0;
473 AddSub1 = (Factor == Opnd1_0) ? Opnd1_1 : Opnd1_0;
475 } else if (Opnd0_1 == Opnd1_1) {
486 if (I0) Flags &= I->getFastMathFlags();
487 if (I1) Flags &= I->getFastMathFlags();
489 // Create expression "NewAddSub = AddSub0 +/- AddsSub1"
490 Value *NewAddSub = (I->getOpcode() == Instruction::FAdd) ?
491 createFAdd(AddSub0, AddSub1) :
492 createFSub(AddSub0, AddSub1);
493 if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
494 const APFloat &F = CFP->getValueAPF();
497 } else if (Instruction *II = dyn_cast<Instruction>(NewAddSub))
498 II->setFastMathFlags(Flags);
501 Value *RI = createFMul(Factor, NewAddSub);
502 if (Instruction *II = dyn_cast<Instruction>(RI))
503 II->setFastMathFlags(Flags);
507 Value *RI = createFDiv(NewAddSub, Factor);
508 if (Instruction *II = dyn_cast<Instruction>(RI))
509 II->setFastMathFlags(Flags);
513 Value *FAddCombine::simplify(Instruction *I) {
514 assert(I->isFast() && "Expected 'fast' instruction");
516 // Currently we are not able to handle vector type.
517 if (I->getType()->isVectorTy())
520 assert((I->getOpcode() == Instruction::FAdd ||
521 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
523 // Save the instruction before calling other member-functions.
526 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
528 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
530 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
531 unsigned Opnd0_ExpNum = 0;
532 unsigned Opnd1_ExpNum = 0;
534 if (!Opnd0.isConstant())
535 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
537 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
538 if (OpndNum == 2 && !Opnd1.isConstant())
539 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
541 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
542 if (Opnd0_ExpNum && Opnd1_ExpNum) {
544 AllOpnds.push_back(&Opnd0_0);
545 AllOpnds.push_back(&Opnd1_0);
546 if (Opnd0_ExpNum == 2)
547 AllOpnds.push_back(&Opnd0_1);
548 if (Opnd1_ExpNum == 2)
549 AllOpnds.push_back(&Opnd1_1);
551 // Compute instruction quota. We should save at least one instruction.
552 unsigned InstQuota = 0;
554 Value *V0 = I->getOperand(0);
555 Value *V1 = I->getOperand(1);
556 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
557 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
559 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
564 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
565 // splitted into two addends, say "V = X - Y", the instruction would have
566 // been optimized into "I = Y - X" in the previous steps.
568 const FAddendCoef &CE = Opnd0.getCoef();
569 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
572 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
575 AllOpnds.push_back(&Opnd0);
576 AllOpnds.push_back(&Opnd1_0);
577 if (Opnd1_ExpNum == 2)
578 AllOpnds.push_back(&Opnd1_1);
580 if (Value *R = simplifyFAdd(AllOpnds, 1))
584 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
587 AllOpnds.push_back(&Opnd1);
588 AllOpnds.push_back(&Opnd0_0);
589 if (Opnd0_ExpNum == 2)
590 AllOpnds.push_back(&Opnd0_1);
592 if (Value *R = simplifyFAdd(AllOpnds, 1))
596 // step 6: Try factorization as the last resort,
597 return performFactorization(I);
600 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
601 unsigned AddendNum = Addends.size();
602 assert(AddendNum <= 4 && "Too many addends");
604 // For saving intermediate results;
605 unsigned NextTmpIdx = 0;
606 FAddend TmpResult[3];
608 // Points to the constant addend of the resulting simplified expression.
609 // If the resulting expr has constant-addend, this constant-addend is
610 // desirable to reside at the top of the resulting expression tree. Placing
611 // constant close to supper-expr(s) will potentially reveal some optimization
612 // opportunities in super-expr(s).
613 const FAddend *ConstAdd = nullptr;
615 // Simplified addends are placed <SimpVect>.
618 // The outer loop works on one symbolic-value at a time. Suppose the input
619 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
620 // The symbolic-values will be processed in this order: x, y, z.
621 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
623 const FAddend *ThisAddend = Addends[SymIdx];
625 // This addend was processed before.
629 Value *Val = ThisAddend->getSymVal();
630 unsigned StartIdx = SimpVect.size();
631 SimpVect.push_back(ThisAddend);
633 // The inner loop collects addends sharing same symbolic-value, and these
634 // addends will be later on folded into a single addend. Following above
635 // example, if the symbolic value "y" is being processed, the inner loop
636 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
637 // be later on folded into "<b1+b2, y>".
638 for (unsigned SameSymIdx = SymIdx + 1;
639 SameSymIdx < AddendNum; SameSymIdx++) {
640 const FAddend *T = Addends[SameSymIdx];
641 if (T && T->getSymVal() == Val) {
642 // Set null such that next iteration of the outer loop will not process
643 // this addend again.
644 Addends[SameSymIdx] = nullptr;
645 SimpVect.push_back(T);
649 // If multiple addends share same symbolic value, fold them together.
650 if (StartIdx + 1 != SimpVect.size()) {
651 FAddend &R = TmpResult[NextTmpIdx ++];
652 R = *SimpVect[StartIdx];
653 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
656 // Pop all addends being folded and push the resulting folded addend.
657 SimpVect.resize(StartIdx);
660 SimpVect.push_back(&R);
663 // Don't push constant addend at this time. It will be the last element
670 assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
671 "out-of-bound access");
674 SimpVect.push_back(ConstAdd);
677 if (!SimpVect.empty())
678 Result = createNaryFAdd(SimpVect, InstrQuota);
680 // The addition is folded to 0.0.
681 Result = ConstantFP::get(Instr->getType(), 0.0);
687 Value *FAddCombine::createNaryFAdd
688 (const AddendVect &Opnds, unsigned InstrQuota) {
689 assert(!Opnds.empty() && "Expect at least one addend");
691 // Step 1: Check if the # of instructions needed exceeds the quota.
693 unsigned InstrNeeded = calcInstrNumber(Opnds);
694 if (InstrNeeded > InstrQuota)
699 // step 2: Emit the N-ary addition.
700 // Note that at most three instructions are involved in Fadd-InstCombine: the
701 // addition in question, and at most two neighboring instructions.
702 // The resulting optimized addition should have at least one less instruction
703 // than the original addition expression tree. This implies that the resulting
704 // N-ary addition has at most two instructions, and we don't need to worry
705 // about tree-height when constructing the N-ary addition.
707 Value *LastVal = nullptr;
708 bool LastValNeedNeg = false;
710 // Iterate the addends, creating fadd/fsub using adjacent two addends.
711 for (const FAddend *Opnd : Opnds) {
713 Value *V = createAddendVal(*Opnd, NeedNeg);
716 LastValNeedNeg = NeedNeg;
720 if (LastValNeedNeg == NeedNeg) {
721 LastVal = createFAdd(LastVal, V);
726 LastVal = createFSub(V, LastVal);
728 LastVal = createFSub(LastVal, V);
730 LastValNeedNeg = false;
733 if (LastValNeedNeg) {
734 LastVal = createFNeg(LastVal);
738 assert(CreateInstrNum == InstrNeeded &&
739 "Inconsistent in instruction numbers");
745 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
746 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
747 if (Instruction *I = dyn_cast<Instruction>(V))
748 createInstPostProc(I);
752 Value *FAddCombine::createFNeg(Value *V) {
753 Value *Zero = cast<Value>(ConstantFP::getZeroValueForNegation(V->getType()));
754 Value *NewV = createFSub(Zero, V);
755 if (Instruction *I = dyn_cast<Instruction>(NewV))
756 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
760 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
761 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
762 if (Instruction *I = dyn_cast<Instruction>(V))
763 createInstPostProc(I);
767 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
768 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
769 if (Instruction *I = dyn_cast<Instruction>(V))
770 createInstPostProc(I);
774 Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
775 Value *V = Builder.CreateFDiv(Opnd0, Opnd1);
776 if (Instruction *I = dyn_cast<Instruction>(V))
777 createInstPostProc(I);
781 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
782 NewInstr->setDebugLoc(Instr->getDebugLoc());
784 // Keep track of the number of instruction created.
788 // Propagate fast-math flags
789 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
792 // Return the number of instruction needed to emit the N-ary addition.
793 // NOTE: Keep this function in sync with createAddendVal().
794 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
795 unsigned OpndNum = Opnds.size();
796 unsigned InstrNeeded = OpndNum - 1;
798 // The number of addends in the form of "(-1)*x".
799 unsigned NegOpndNum = 0;
801 // Adjust the number of instructions needed to emit the N-ary add.
802 for (const FAddend *Opnd : Opnds) {
803 if (Opnd->isConstant())
806 // The constant check above is really for a few special constant
808 if (isa<UndefValue>(Opnd->getSymVal()))
811 const FAddendCoef &CE = Opnd->getCoef();
812 if (CE.isMinusOne() || CE.isMinusTwo())
815 // Let the addend be "c * x". If "c == +/-1", the value of the addend
816 // is immediately available; otherwise, it needs exactly one instruction
817 // to evaluate the value.
818 if (!CE.isMinusOne() && !CE.isOne())
821 if (NegOpndNum == OpndNum)
826 // Input Addend Value NeedNeg(output)
827 // ================================================================
828 // Constant C C false
829 // <+/-1, V> V coefficient is -1
830 // <2/-2, V> "fadd V, V" coefficient is -2
831 // <C, V> "fmul V, C" false
833 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
834 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
835 const FAddendCoef &Coeff = Opnd.getCoef();
837 if (Opnd.isConstant()) {
839 return Coeff.getValue(Instr->getType());
842 Value *OpndVal = Opnd.getSymVal();
844 if (Coeff.isMinusOne() || Coeff.isOne()) {
845 NeedNeg = Coeff.isMinusOne();
849 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
850 NeedNeg = Coeff.isMinusTwo();
851 return createFAdd(OpndVal, OpndVal);
855 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
858 /// \brief Return true if we can prove that:
859 /// (sub LHS, RHS) === (sub nsw LHS, RHS)
860 /// This basically requires proving that the add in the original type would not
861 /// overflow to change the sign bit or have a carry out.
862 /// TODO: Handle this for Vectors.
863 bool InstCombiner::willNotOverflowSignedSub(const Value *LHS,
865 const Instruction &CxtI) const {
866 // If LHS and RHS each have at least two sign bits, the subtraction
868 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
869 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
872 KnownBits LHSKnown = computeKnownBits(LHS, 0, &CxtI);
874 KnownBits RHSKnown = computeKnownBits(RHS, 0, &CxtI);
876 // Subtraction of two 2's complement numbers having identical signs will
878 if ((LHSKnown.isNegative() && RHSKnown.isNegative()) ||
879 (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()))
882 // TODO: implement logic similar to checkRippleForAdd
886 /// \brief Return true if we can prove that:
887 /// (sub LHS, RHS) === (sub nuw LHS, RHS)
888 bool InstCombiner::willNotOverflowUnsignedSub(const Value *LHS,
890 const Instruction &CxtI) const {
891 // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
892 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI);
893 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI);
894 if (LHSKnown.isNegative() && RHSKnown.isNonNegative())
900 // Checks if any operand is negative and we can convert add to sub.
901 // This function checks for following negative patterns
902 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
903 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
904 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
905 static Value *checkForNegativeOperand(BinaryOperator &I,
906 InstCombiner::BuilderTy &Builder) {
907 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
909 // This function creates 2 instructions to replace ADD, we need at least one
910 // of LHS or RHS to have one use to ensure benefit in transform.
911 if (!LHS->hasOneUse() && !RHS->hasOneUse())
914 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
915 const APInt *C1 = nullptr, *C2 = nullptr;
917 // if ONE is on other side, swap
918 if (match(RHS, m_Add(m_Value(X), m_One())))
921 if (match(LHS, m_Add(m_Value(X), m_One()))) {
922 // if XOR on other side, swap
923 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
926 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
927 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
928 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
929 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
930 Value *NewAnd = Builder.CreateAnd(Z, *C1);
931 return Builder.CreateSub(RHS, NewAnd, "sub");
932 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
933 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
934 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
935 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
936 return Builder.CreateSub(RHS, NewOr, "sub");
941 // Restore LHS and RHS
942 LHS = I.getOperand(0);
943 RHS = I.getOperand(1);
945 // if XOR is on other side, swap
946 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
950 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
951 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
952 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
953 if (C1->countTrailingZeros() == 0)
954 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
955 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
956 return Builder.CreateSub(RHS, NewOr, "sub");
961 Instruction *InstCombiner::foldAddWithConstant(BinaryOperator &Add) {
962 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
964 if (!match(Op1, m_Constant(Op1C)))
967 if (Instruction *NV = foldOpWithConstantIntoOperand(Add))
971 // zext(bool) + C -> bool ? C + 1 : C
972 if (match(Op0, m_ZExt(m_Value(X))) &&
973 X->getType()->getScalarSizeInBits() == 1)
974 return SelectInst::Create(X, AddOne(Op1C), Op1);
976 // ~X + C --> (C-1) - X
977 if (match(Op0, m_Not(m_Value(X))))
978 return BinaryOperator::CreateSub(SubOne(Op1C), X);
981 if (!match(Op1, m_APInt(C)))
984 if (C->isSignMask()) {
985 // If wrapping is not allowed, then the addition must set the sign bit:
986 // X + (signmask) --> X | signmask
987 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
988 return BinaryOperator::CreateOr(Op0, Op1);
990 // If wrapping is allowed, then the addition flips the sign bit of LHS:
991 // X + (signmask) --> X ^ signmask
992 return BinaryOperator::CreateXor(Op0, Op1);
995 // Is this add the last step in a convoluted sext?
996 // add(zext(xor i16 X, -32768), -32768) --> sext X
997 Type *Ty = Add.getType();
999 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
1000 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
1001 return CastInst::Create(Instruction::SExt, X, Ty);
1003 // (add (zext (add nuw X, C2)), C) --> (zext (add nuw X, C2 + C))
1004 if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
1005 C->isNegative() && C->sge(-C2->sext(C->getBitWidth()))) {
1007 ConstantInt::get(X->getType(), *C2 + C->trunc(C2->getBitWidth()));
1008 return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
1011 if (C->isOneValue() && Op0->hasOneUse()) {
1012 // add (sext i1 X), 1 --> zext (not X)
1013 // TODO: The smallest IR representation is (select X, 0, 1), and that would
1014 // not require the one-use check. But we need to remove a transform in
1015 // visitSelect and make sure that IR value tracking for select is equal or
1016 // better than for these ops.
1017 if (match(Op0, m_SExt(m_Value(X))) &&
1018 X->getType()->getScalarSizeInBits() == 1)
1019 return new ZExtInst(Builder.CreateNot(X), Ty);
1021 // Shifts and add used to flip and mask off the low bit:
1022 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
1024 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
1025 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
1026 Value *NotX = Builder.CreateNot(X);
1027 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
1034 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
1035 bool Changed = SimplifyAssociativeOrCommutative(I);
1036 if (Value *V = SimplifyVectorOp(I))
1037 return replaceInstUsesWith(I, V);
1039 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1041 SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1042 SQ.getWithInstruction(&I)))
1043 return replaceInstUsesWith(I, V);
1045 // (A*B)+(A*C) -> A*(B+C) etc
1046 if (Value *V = SimplifyUsingDistributiveLaws(I))
1047 return replaceInstUsesWith(I, V);
1049 if (Instruction *X = foldAddWithConstant(I))
1052 // FIXME: This should be moved into the above helper function to allow these
1053 // transforms for general constant or constant splat vectors.
1054 Type *Ty = I.getType();
1055 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1056 Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
1057 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
1058 unsigned TySizeBits = Ty->getScalarSizeInBits();
1059 const APInt &RHSVal = CI->getValue();
1060 unsigned ExtendAmt = 0;
1061 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
1062 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
1063 if (XorRHS->getValue() == -RHSVal) {
1064 if (RHSVal.isPowerOf2())
1065 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
1066 else if (XorRHS->getValue().isPowerOf2())
1067 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
1071 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
1072 if (!MaskedValueIsZero(XorLHS, Mask, 0, &I))
1077 Constant *ShAmt = ConstantInt::get(Ty, ExtendAmt);
1078 Value *NewShl = Builder.CreateShl(XorLHS, ShAmt, "sext");
1079 return BinaryOperator::CreateAShr(NewShl, ShAmt);
1082 // If this is a xor that was canonicalized from a sub, turn it back into
1083 // a sub and fuse this add with it.
1084 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
1085 KnownBits LHSKnown = computeKnownBits(XorLHS, 0, &I);
1086 if ((XorRHS->getValue() | LHSKnown.Zero).isAllOnesValue())
1087 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
1090 // (X + signmask) + C could have gotten canonicalized to (X^signmask) + C,
1091 // transform them into (X + (signmask ^ C))
1092 if (XorRHS->getValue().isSignMask())
1093 return BinaryOperator::CreateAdd(XorLHS,
1094 ConstantExpr::getXor(XorRHS, CI));
1098 if (Ty->isIntOrIntVectorTy(1))
1099 return BinaryOperator::CreateXor(LHS, RHS);
1103 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1104 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1105 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1110 if (match(LHS, m_Neg(m_Value(A)))) {
1111 // -A + -B --> -(A + B)
1112 if (match(RHS, m_Neg(m_Value(B))))
1113 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1116 return BinaryOperator::CreateSub(RHS, A);
1120 if (match(RHS, m_Neg(m_Value(B))))
1121 return BinaryOperator::CreateSub(LHS, B);
1123 if (Value *V = checkForNegativeOperand(I, Builder))
1124 return replaceInstUsesWith(I, V);
1126 // A+B --> A|B iff A and B have no bits set in common.
1127 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1128 return BinaryOperator::CreateOr(LHS, RHS);
1130 // FIXME: We already did a check for ConstantInt RHS above this.
1131 // FIXME: Is this pattern covered by another fold? No regression tests fail on
1133 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
1134 // (X & FF00) + xx00 -> (X+xx00) & FF00
1137 if (LHS->hasOneUse() &&
1138 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
1139 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
1140 // See if all bits from the first bit set in the Add RHS up are included
1141 // in the mask. First, get the rightmost bit.
1142 const APInt &AddRHSV = CRHS->getValue();
1144 // Form a mask of all bits from the lowest bit added through the top.
1145 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
1147 // See if the and mask includes all of these bits.
1148 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
1150 if (AddRHSHighBits == AddRHSHighBitsAnd) {
1151 // Okay, the xform is safe. Insert the new add pronto.
1152 Value *NewAdd = Builder.CreateAdd(X, CRHS, LHS->getName());
1153 return BinaryOperator::CreateAnd(NewAdd, C2);
1158 // add (select X 0 (sub n A)) A --> select X A n
1160 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1163 SI = dyn_cast<SelectInst>(RHS);
1166 if (SI && SI->hasOneUse()) {
1167 Value *TV = SI->getTrueValue();
1168 Value *FV = SI->getFalseValue();
1171 // Can we fold the add into the argument of the select?
1172 // We check both true and false select arguments for a matching subtract.
1173 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1174 // Fold the add into the true select value.
1175 return SelectInst::Create(SI->getCondition(), N, A);
1177 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1178 // Fold the add into the false select value.
1179 return SelectInst::Create(SI->getCondition(), A, N);
1183 // Check for (add (sext x), y), see if we can merge this into an
1184 // integer add followed by a sext.
1185 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
1186 // (add (sext x), cst) --> (sext (add x, cst'))
1187 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1188 if (LHSConv->hasOneUse()) {
1190 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1191 if (ConstantExpr::getSExt(CI, Ty) == RHSC &&
1192 willNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
1193 // Insert the new, smaller add.
1195 Builder.CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
1196 return new SExtInst(NewAdd, Ty);
1201 // (add (sext x), (sext y)) --> (sext (add int x, y))
1202 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
1203 // Only do this if x/y have the same type, if at least one of them has a
1204 // single use (so we don't increase the number of sexts), and if the
1205 // integer add will not overflow.
1206 if (LHSConv->getOperand(0)->getType() ==
1207 RHSConv->getOperand(0)->getType() &&
1208 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1209 willNotOverflowSignedAdd(LHSConv->getOperand(0),
1210 RHSConv->getOperand(0), I)) {
1211 // Insert the new integer add.
1212 Value *NewAdd = Builder.CreateNSWAdd(LHSConv->getOperand(0),
1213 RHSConv->getOperand(0), "addconv");
1214 return new SExtInst(NewAdd, Ty);
1219 // Check for (add (zext x), y), see if we can merge this into an
1220 // integer add followed by a zext.
1221 if (auto *LHSConv = dyn_cast<ZExtInst>(LHS)) {
1222 // (add (zext x), cst) --> (zext (add x, cst'))
1223 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1224 if (LHSConv->hasOneUse()) {
1226 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1227 if (ConstantExpr::getZExt(CI, Ty) == RHSC &&
1228 willNotOverflowUnsignedAdd(LHSConv->getOperand(0), CI, I)) {
1229 // Insert the new, smaller add.
1231 Builder.CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
1232 return new ZExtInst(NewAdd, Ty);
1237 // (add (zext x), (zext y)) --> (zext (add int x, y))
1238 if (auto *RHSConv = dyn_cast<ZExtInst>(RHS)) {
1239 // Only do this if x/y have the same type, if at least one of them has a
1240 // single use (so we don't increase the number of zexts), and if the
1241 // integer add will not overflow.
1242 if (LHSConv->getOperand(0)->getType() ==
1243 RHSConv->getOperand(0)->getType() &&
1244 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1245 willNotOverflowUnsignedAdd(LHSConv->getOperand(0),
1246 RHSConv->getOperand(0), I)) {
1247 // Insert the new integer add.
1248 Value *NewAdd = Builder.CreateNUWAdd(
1249 LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv");
1250 return new ZExtInst(NewAdd, Ty);
1255 // (add (xor A, B) (and A, B)) --> (or A, B)
1256 if (match(LHS, m_Xor(m_Value(A), m_Value(B))) &&
1257 match(RHS, m_c_And(m_Specific(A), m_Specific(B))))
1258 return BinaryOperator::CreateOr(A, B);
1260 // (add (and A, B) (xor A, B)) --> (or A, B)
1261 if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
1262 match(LHS, m_c_And(m_Specific(A), m_Specific(B))))
1263 return BinaryOperator::CreateOr(A, B);
1265 // (add (or A, B) (and A, B)) --> (add A, B)
1266 if (match(LHS, m_Or(m_Value(A), m_Value(B))) &&
1267 match(RHS, m_c_And(m_Specific(A), m_Specific(B)))) {
1273 // (add (and A, B) (or A, B)) --> (add A, B)
1274 if (match(RHS, m_Or(m_Value(A), m_Value(B))) &&
1275 match(LHS, m_c_And(m_Specific(A), m_Specific(B)))) {
1281 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1282 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1283 // computeKnownBits.
1284 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) {
1286 I.setHasNoSignedWrap(true);
1288 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) {
1290 I.setHasNoUnsignedWrap(true);
1293 return Changed ? &I : nullptr;
1296 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
1297 bool Changed = SimplifyAssociativeOrCommutative(I);
1298 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1300 if (Value *V = SimplifyVectorOp(I))
1301 return replaceInstUsesWith(I, V);
1303 if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(),
1304 SQ.getWithInstruction(&I)))
1305 return replaceInstUsesWith(I, V);
1307 if (isa<Constant>(RHS))
1308 if (Instruction *FoldedFAdd = foldOpWithConstantIntoOperand(I))
1312 // -A + -B --> -(A + B)
1313 if (Value *LHSV = dyn_castFNegVal(LHS)) {
1314 Instruction *RI = BinaryOperator::CreateFSub(RHS, LHSV);
1315 RI->copyFastMathFlags(&I);
1320 if (!isa<Constant>(RHS))
1321 if (Value *V = dyn_castFNegVal(RHS)) {
1322 Instruction *RI = BinaryOperator::CreateFSub(LHS, V);
1323 RI->copyFastMathFlags(&I);
1327 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1328 // integer add followed by a promotion.
1329 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1330 Value *LHSIntVal = LHSConv->getOperand(0);
1331 Type *FPType = LHSConv->getType();
1333 // TODO: This check is overly conservative. In many cases known bits
1334 // analysis can tell us that the result of the addition has less significant
1335 // bits than the integer type can hold.
1336 auto IsValidPromotion = [](Type *FTy, Type *ITy) {
1337 Type *FScalarTy = FTy->getScalarType();
1338 Type *IScalarTy = ITy->getScalarType();
1340 // Do we have enough bits in the significand to represent the result of
1341 // the integer addition?
1342 unsigned MaxRepresentableBits =
1343 APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
1344 return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
1347 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1348 // ... if the constant fits in the integer value. This is useful for things
1349 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1350 // requires a constant pool load, and generally allows the add to be better
1352 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1353 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1355 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1356 if (LHSConv->hasOneUse() &&
1357 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1358 willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1359 // Insert the new integer add.
1360 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
1361 return new SIToFPInst(NewAdd, I.getType());
1365 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1366 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1367 Value *RHSIntVal = RHSConv->getOperand(0);
1368 // It's enough to check LHS types only because we require int types to
1369 // be the same for this transform.
1370 if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1371 // Only do this if x/y have the same type, if at least one of them has a
1372 // single use (so we don't increase the number of int->fp conversions),
1373 // and if the integer add will not overflow.
1374 if (LHSIntVal->getType() == RHSIntVal->getType() &&
1375 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1376 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1377 // Insert the new integer add.
1378 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
1379 return new SIToFPInst(NewAdd, I.getType());
1385 // Handle specials cases for FAdd with selects feeding the operation
1386 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
1387 return replaceInstUsesWith(I, V);
1390 if (Value *V = FAddCombine(Builder).simplify(&I))
1391 return replaceInstUsesWith(I, V);
1394 return Changed ? &I : nullptr;
1397 /// Optimize pointer differences into the same array into a size. Consider:
1398 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1399 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1400 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
1402 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1404 bool Swapped = false;
1405 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1407 // For now we require one side to be the base pointer "A" or a constant
1408 // GEP derived from it.
1409 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1411 if (LHSGEP->getOperand(0) == RHS) {
1414 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1415 // (gep X, ...) - (gep X, ...)
1416 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1417 RHSGEP->getOperand(0)->stripPointerCasts()) {
1425 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1427 if (RHSGEP->getOperand(0) == LHS) {
1430 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1431 // (gep X, ...) - (gep X, ...)
1432 if (RHSGEP->getOperand(0)->stripPointerCasts() ==
1433 LHSGEP->getOperand(0)->stripPointerCasts()) {
1446 // (gep X, ...) - (gep X, ...)
1448 // Avoid duplicating the arithmetic if there are more than one non-constant
1449 // indices between the two GEPs and either GEP has a non-constant index and
1450 // multiple users. If zero non-constant index, the result is a constant and
1451 // there is no duplication. If one non-constant index, the result is an add
1452 // or sub with a constant, which is no larger than the original code, and
1453 // there's no duplicated arithmetic, even if either GEP has multiple
1454 // users. If more than one non-constant indices combined, as long as the GEP
1455 // with at least one non-constant index doesn't have multiple users, there
1456 // is no duplication.
1457 unsigned NumNonConstantIndices1 = GEP1->countNonConstantIndices();
1458 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1459 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1460 ((NumNonConstantIndices1 > 0 && !GEP1->hasOneUse()) ||
1461 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1466 // Emit the offset of the GEP and an intptr_t.
1467 Value *Result = EmitGEPOffset(GEP1);
1469 // If we had a constant expression GEP on the other side offsetting the
1470 // pointer, subtract it from the offset we have.
1472 Value *Offset = EmitGEPOffset(GEP2);
1473 Result = Builder.CreateSub(Result, Offset);
1476 // If we have p - gep(p, ...) then we have to negate the result.
1478 Result = Builder.CreateNeg(Result, "diff.neg");
1480 return Builder.CreateIntCast(Result, Ty, true);
1483 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
1484 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1486 if (Value *V = SimplifyVectorOp(I))
1487 return replaceInstUsesWith(I, V);
1490 SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1491 SQ.getWithInstruction(&I)))
1492 return replaceInstUsesWith(I, V);
1494 // (A*B)-(A*C) -> A*(B-C) etc
1495 if (Value *V = SimplifyUsingDistributiveLaws(I))
1496 return replaceInstUsesWith(I, V);
1498 // If this is a 'B = x-(-A)', change to B = x+A.
1499 if (Value *V = dyn_castNegVal(Op1)) {
1500 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1502 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1503 assert(BO->getOpcode() == Instruction::Sub &&
1504 "Expected a subtraction operator!");
1505 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1506 Res->setHasNoSignedWrap(true);
1508 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1509 Res->setHasNoSignedWrap(true);
1515 if (I.getType()->isIntOrIntVectorTy(1))
1516 return BinaryOperator::CreateXor(Op0, Op1);
1518 // Replace (-1 - A) with (~A).
1519 if (match(Op0, m_AllOnes()))
1520 return BinaryOperator::CreateNot(Op1);
1522 if (Constant *C = dyn_cast<Constant>(Op0)) {
1524 // C - zext(bool) -> bool ? C - 1 : C
1525 if (match(Op1, m_ZExt(m_Value(X))) &&
1526 X->getType()->getScalarSizeInBits() == 1)
1527 return SelectInst::Create(X, SubOne(C), C);
1529 // C - ~X == X + (1+C)
1530 if (match(Op1, m_Not(m_Value(X))))
1531 return BinaryOperator::CreateAdd(X, AddOne(C));
1533 // Try to fold constant sub into select arguments.
1534 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1535 if (Instruction *R = FoldOpIntoSelect(I, SI))
1538 // Try to fold constant sub into PHI values.
1539 if (PHINode *PN = dyn_cast<PHINode>(Op1))
1540 if (Instruction *R = foldOpIntoPhi(I, PN))
1543 // C-(X+C2) --> (C-C2)-X
1545 if (match(Op1, m_Add(m_Value(X), m_Constant(C2))))
1546 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1548 // Fold (sub 0, (zext bool to B)) --> (sext bool to B)
1549 if (C->isNullValue() && match(Op1, m_ZExt(m_Value(X))))
1550 if (X->getType()->isIntOrIntVectorTy(1))
1551 return CastInst::CreateSExtOrBitCast(X, Op1->getType());
1553 // Fold (sub 0, (sext bool to B)) --> (zext bool to B)
1554 if (C->isNullValue() && match(Op1, m_SExt(m_Value(X))))
1555 if (X->getType()->isIntOrIntVectorTy(1))
1556 return CastInst::CreateZExtOrBitCast(X, Op1->getType());
1560 if (match(Op0, m_APInt(Op0C))) {
1561 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1563 // -(X >>u 31) -> (X >>s 31)
1564 // -(X >>s 31) -> (X >>u 31)
1565 if (Op0C->isNullValue()) {
1568 if (match(Op1, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1569 *ShAmt == BitWidth - 1) {
1570 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1571 return BinaryOperator::CreateAShr(X, ShAmtOp);
1573 if (match(Op1, m_AShr(m_Value(X), m_APInt(ShAmt))) &&
1574 *ShAmt == BitWidth - 1) {
1575 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1576 return BinaryOperator::CreateLShr(X, ShAmtOp);
1580 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1582 if (Op0C->isMask()) {
1583 KnownBits RHSKnown = computeKnownBits(Op1, 0, &I);
1584 if ((*Op0C | RHSKnown.Zero).isAllOnesValue())
1585 return BinaryOperator::CreateXor(Op1, Op0);
1591 // X-(X+Y) == -Y X-(Y+X) == -Y
1592 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
1593 return BinaryOperator::CreateNeg(Y);
1596 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1597 return BinaryOperator::CreateNeg(Y);
1600 // (sub (or A, B), (xor A, B)) --> (and A, B)
1603 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1604 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1605 return BinaryOperator::CreateAnd(A, B);
1610 // ((X | Y) - X) --> (~X & Y)
1611 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
1612 return BinaryOperator::CreateAnd(
1613 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
1616 if (Op1->hasOneUse()) {
1617 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
1618 Constant *C = nullptr;
1620 // (X - (Y - Z)) --> (X + (Z - Y)).
1621 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
1622 return BinaryOperator::CreateAdd(Op0,
1623 Builder.CreateSub(Z, Y, Op1->getName()));
1625 // (X - (X & Y)) --> (X & ~Y)
1626 if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
1627 return BinaryOperator::CreateAnd(Op0,
1628 Builder.CreateNot(Y, Y->getName() + ".not"));
1630 // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
1631 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
1632 C->isNotMinSignedValue() && !C->isOneValue())
1633 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
1635 // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
1636 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
1637 if (Value *XNeg = dyn_castNegVal(X))
1638 return BinaryOperator::CreateShl(XNeg, Y);
1640 // Subtracting -1/0 is the same as adding 1/0:
1641 // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y)
1642 // 'nuw' is dropped in favor of the canonical form.
1643 if (match(Op1, m_SExt(m_Value(Y))) &&
1644 Y->getType()->getScalarSizeInBits() == 1) {
1645 Value *Zext = Builder.CreateZExt(Y, I.getType());
1646 BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
1647 Add->setHasNoSignedWrap(I.hasNoSignedWrap());
1651 // X - A*-B -> X + A*B
1652 // X - -A*B -> X + A*B
1655 if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
1656 return BinaryOperator::CreateAdd(Op0, Builder.CreateMul(A, B));
1658 // X - A*CI -> X + A*-CI
1659 // No need to handle commuted multiply because multiply handling will
1660 // ensure constant will be move to the right hand side.
1661 if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) {
1662 Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(CI));
1663 return BinaryOperator::CreateAdd(Op0, NewMul);
1667 // Optimize pointer differences into the same array into a size. Consider:
1668 // &A[10] - &A[0]: we should compile this to "10".
1669 Value *LHSOp, *RHSOp;
1670 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1671 match(Op1, m_PtrToInt(m_Value(RHSOp))))
1672 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1673 return replaceInstUsesWith(I, Res);
1675 // trunc(p)-trunc(q) -> trunc(p-q)
1676 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1677 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1678 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1679 return replaceInstUsesWith(I, Res);
1681 bool Changed = false;
1682 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
1684 I.setHasNoSignedWrap(true);
1686 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
1688 I.setHasNoUnsignedWrap(true);
1691 return Changed ? &I : nullptr;
1694 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
1695 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1697 if (Value *V = SimplifyVectorOp(I))
1698 return replaceInstUsesWith(I, V);
1700 if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(),
1701 SQ.getWithInstruction(&I)))
1702 return replaceInstUsesWith(I, V);
1704 // fsub nsz 0, X ==> fsub nsz -0.0, X
1705 if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_Zero())) {
1706 // Subtraction from -0.0 is the canonical form of fneg.
1707 Instruction *NewI = BinaryOperator::CreateFNeg(Op1);
1708 NewI->copyFastMathFlags(&I);
1712 if (isa<Constant>(Op0))
1713 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1714 if (Instruction *NV = FoldOpIntoSelect(I, SI))
1717 // If this is a 'B = x-(-A)', change to B = x+A, potentially looking
1718 // through FP extensions/truncations along the way.
1719 if (Value *V = dyn_castFNegVal(Op1)) {
1720 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, V);
1721 NewI->copyFastMathFlags(&I);
1724 if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
1725 if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
1726 Value *NewTrunc = Builder.CreateFPTrunc(V, I.getType());
1727 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
1728 NewI->copyFastMathFlags(&I);
1731 } else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
1732 if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
1733 Value *NewExt = Builder.CreateFPExt(V, I.getType());
1734 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
1735 NewI->copyFastMathFlags(&I);
1740 // Handle specials cases for FSub with selects feeding the operation
1741 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
1742 return replaceInstUsesWith(I, V);
1745 if (Value *V = FAddCombine(Builder).simplify(&I))
1746 return replaceInstUsesWith(I, V);