1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for add, fadd, sub, and fsub.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/GetElementPtrTypeIterator.h"
19 #include "llvm/IR/PatternMatch.h"
22 using namespace PatternMatch;
24 #define DEBUG_TYPE "instcombine"
28 /// Class representing coefficient of floating-point addend.
29 /// This class needs to be highly efficient, which is especially true for
30 /// the constructor. As of I write this comment, the cost of the default
31 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
32 /// perform write-merging).
36 // The constructor has to initialize a APFloat, which is unnecessary for
37 // most addends which have coefficient either 1 or -1. So, the constructor
38 // is expensive. In order to avoid the cost of the constructor, we should
39 // reuse some instances whenever possible. The pre-created instances
40 // FAddCombine::Add[0-5] embodies this idea.
42 FAddendCoef() : IsFp(false), BufHasFpVal(false), IntVal(0) {}
46 assert(!insaneIntVal(C) && "Insane coefficient");
47 IsFp = false; IntVal = C;
50 void set(const APFloat& C);
54 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
55 Value *getValue(Type *) const;
57 // If possible, don't define operator+/operator- etc because these
58 // operators inevitably call FAddendCoef's constructor which is not cheap.
59 void operator=(const FAddendCoef &A);
60 void operator+=(const FAddendCoef &A);
61 void operator*=(const FAddendCoef &S);
63 bool isOne() const { return isInt() && IntVal == 1; }
64 bool isTwo() const { return isInt() && IntVal == 2; }
65 bool isMinusOne() const { return isInt() && IntVal == -1; }
66 bool isMinusTwo() const { return isInt() && IntVal == -2; }
69 bool insaneIntVal(int V) { return V > 4 || V < -4; }
70 APFloat *getFpValPtr()
71 { return reinterpret_cast<APFloat*>(&FpValBuf.buffer[0]); }
72 const APFloat *getFpValPtr() const
73 { return reinterpret_cast<const APFloat*>(&FpValBuf.buffer[0]); }
75 const APFloat &getFpVal() const {
76 assert(IsFp && BufHasFpVal && "Incorret state");
77 return *getFpValPtr();
81 assert(IsFp && BufHasFpVal && "Incorret state");
82 return *getFpValPtr();
85 bool isInt() const { return !IsFp; }
87 // If the coefficient is represented by an integer, promote it to a
89 void convertToFpType(const fltSemantics &Sem);
91 // Construct an APFloat from a signed integer.
92 // TODO: We should get rid of this function when APFloat can be constructed
93 // from an *SIGNED* integer.
94 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
99 // True iff FpValBuf contains an instance of APFloat.
102 // The integer coefficient of an individual addend is either 1 or -1,
103 // and we try to simplify at most 4 addends from neighboring at most
104 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
105 // is overkill of this end.
108 AlignedCharArrayUnion<APFloat> FpValBuf;
111 /// FAddend is used to represent floating-point addend. An addend is
112 /// represented as <C, V>, where the V is a symbolic value, and C is a
113 /// constant coefficient. A constant addend is represented as <C, 0>.
117 FAddend() : Val(nullptr) {}
119 Value *getSymVal() const { return Val; }
120 const FAddendCoef &getCoef() const { return Coeff; }
122 bool isConstant() const { return Val == nullptr; }
123 bool isZero() const { return Coeff.isZero(); }
125 void set(short Coefficient, Value *V) {
126 Coeff.set(Coefficient);
129 void set(const APFloat &Coefficient, Value *V) {
130 Coeff.set(Coefficient);
133 void set(const ConstantFP *Coefficient, Value *V) {
134 Coeff.set(Coefficient->getValueAPF());
138 void negate() { Coeff.negate(); }
140 /// Drill down the U-D chain one step to find the definition of V, and
141 /// try to break the definition into one or two addends.
142 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
144 /// Similar to FAddend::drillDownOneStep() except that the value being
145 /// splitted is the addend itself.
146 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
148 void operator+=(const FAddend &T) {
149 assert((Val == T.Val) && "Symbolic-values disagree");
154 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
156 // This addend has the value of "Coeff * Val".
161 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
162 /// with its neighboring at most two instructions.
166 FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {}
167 Value *simplify(Instruction *FAdd);
170 typedef SmallVector<const FAddend*, 4> AddendVect;
172 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
174 Value *performFactorization(Instruction *I);
176 /// Convert given addend to a Value
177 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
179 /// Return the number of instructions needed to emit the N-ary addition.
180 unsigned calcInstrNumber(const AddendVect& Vect);
181 Value *createFSub(Value *Opnd0, Value *Opnd1);
182 Value *createFAdd(Value *Opnd0, Value *Opnd1);
183 Value *createFMul(Value *Opnd0, Value *Opnd1);
184 Value *createFDiv(Value *Opnd0, Value *Opnd1);
185 Value *createFNeg(Value *V);
186 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
187 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
189 InstCombiner::BuilderTy *Builder;
192 // Debugging stuff are clustered here.
194 unsigned CreateInstrNum;
195 void initCreateInstNum() { CreateInstrNum = 0; }
196 void incCreateInstNum() { CreateInstrNum++; }
198 void initCreateInstNum() {}
199 void incCreateInstNum() {}
203 } // anonymous namespace
205 //===----------------------------------------------------------------------===//
208 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
210 //===----------------------------------------------------------------------===//
211 FAddendCoef::~FAddendCoef() {
213 getFpValPtr()->~APFloat();
216 void FAddendCoef::set(const APFloat& C) {
217 APFloat *P = getFpValPtr();
220 // As the buffer is meanless byte stream, we cannot call
221 // APFloat::operator=().
226 IsFp = BufHasFpVal = true;
229 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
233 APFloat *P = getFpValPtr();
235 new(P) APFloat(Sem, IntVal);
237 new(P) APFloat(Sem, 0 - IntVal);
240 IsFp = BufHasFpVal = true;
243 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
245 return APFloat(Sem, Val);
247 APFloat T(Sem, 0 - Val);
253 void FAddendCoef::operator=(const FAddendCoef &That) {
257 set(That.getFpVal());
260 void FAddendCoef::operator+=(const FAddendCoef &That) {
261 enum APFloat::roundingMode RndMode = APFloat::rmNearestTiesToEven;
262 if (isInt() == That.isInt()) {
264 IntVal += That.IntVal;
266 getFpVal().add(That.getFpVal(), RndMode);
271 const APFloat &T = That.getFpVal();
272 convertToFpType(T.getSemantics());
273 getFpVal().add(T, RndMode);
277 APFloat &T = getFpVal();
278 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
281 void FAddendCoef::operator*=(const FAddendCoef &That) {
285 if (That.isMinusOne()) {
290 if (isInt() && That.isInt()) {
291 int Res = IntVal * (int)That.IntVal;
292 assert(!insaneIntVal(Res) && "Insane int value");
297 const fltSemantics &Semantic =
298 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
301 convertToFpType(Semantic);
302 APFloat &F0 = getFpVal();
305 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
306 APFloat::rmNearestTiesToEven);
308 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
311 void FAddendCoef::negate() {
315 getFpVal().changeSign();
318 Value *FAddendCoef::getValue(Type *Ty) const {
320 ConstantFP::get(Ty, float(IntVal)) :
321 ConstantFP::get(Ty->getContext(), getFpVal());
324 // The definition of <Val> Addends
325 // =========================================
326 // A + B <1, A>, <1,B>
327 // A - B <1, A>, <1,B>
330 // A + C <1, A> <C, NULL>
331 // 0 +/- 0 <0, NULL> (corner case)
333 // Legend: A and B are not constant, C is constant
335 unsigned FAddend::drillValueDownOneStep
336 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
337 Instruction *I = nullptr;
338 if (!Val || !(I = dyn_cast<Instruction>(Val)))
341 unsigned Opcode = I->getOpcode();
343 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
345 Value *Opnd0 = I->getOperand(0);
346 Value *Opnd1 = I->getOperand(1);
347 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
350 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
355 Addend0.set(1, Opnd0);
357 Addend0.set(C0, nullptr);
361 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
363 Addend.set(1, Opnd1);
365 Addend.set(C1, nullptr);
366 if (Opcode == Instruction::FSub)
371 return Opnd0 && Opnd1 ? 2 : 1;
373 // Both operands are zero. Weird!
374 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
378 if (I->getOpcode() == Instruction::FMul) {
379 Value *V0 = I->getOperand(0);
380 Value *V1 = I->getOperand(1);
381 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
386 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
395 // Try to break *this* addend into two addends. e.g. Suppose this addend is
396 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
397 // i.e. <2.3, X> and <2.3, Y>.
399 unsigned FAddend::drillAddendDownOneStep
400 (FAddend &Addend0, FAddend &Addend1) const {
404 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
405 if (!BreakNum || Coeff.isOne())
408 Addend0.Scale(Coeff);
411 Addend1.Scale(Coeff);
416 // Try to perform following optimization on the input instruction I. Return the
417 // simplified expression if was successful; otherwise, return 0.
419 // Instruction "I" is Simplified into
420 // -------------------------------------------------------
421 // (x * y) +/- (x * z) x * (y +/- z)
422 // (y / x) +/- (z / x) (y +/- z) / x
424 Value *FAddCombine::performFactorization(Instruction *I) {
425 assert((I->getOpcode() == Instruction::FAdd ||
426 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
428 Instruction *I0 = dyn_cast<Instruction>(I->getOperand(0));
429 Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1));
431 if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
435 if (I0->getOpcode() == Instruction::FMul)
437 else if (I0->getOpcode() != Instruction::FDiv)
440 Value *Opnd0_0 = I0->getOperand(0);
441 Value *Opnd0_1 = I0->getOperand(1);
442 Value *Opnd1_0 = I1->getOperand(0);
443 Value *Opnd1_1 = I1->getOperand(1);
445 // Input Instr I Factor AddSub0 AddSub1
446 // ----------------------------------------------
447 // (x*y) +/- (x*z) x y z
448 // (y/x) +/- (z/x) x y z
450 Value *Factor = nullptr;
451 Value *AddSub0 = nullptr, *AddSub1 = nullptr;
454 if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
456 else if (Opnd0_1 == Opnd1_0 || Opnd0_1 == Opnd1_1)
460 AddSub0 = (Factor == Opnd0_0) ? Opnd0_1 : Opnd0_0;
461 AddSub1 = (Factor == Opnd1_0) ? Opnd1_1 : Opnd1_0;
463 } else if (Opnd0_1 == Opnd1_1) {
473 Flags.setUnsafeAlgebra();
474 if (I0) Flags &= I->getFastMathFlags();
475 if (I1) Flags &= I->getFastMathFlags();
477 // Create expression "NewAddSub = AddSub0 +/- AddsSub1"
478 Value *NewAddSub = (I->getOpcode() == Instruction::FAdd) ?
479 createFAdd(AddSub0, AddSub1) :
480 createFSub(AddSub0, AddSub1);
481 if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
482 const APFloat &F = CFP->getValueAPF();
485 } else if (Instruction *II = dyn_cast<Instruction>(NewAddSub))
486 II->setFastMathFlags(Flags);
489 Value *RI = createFMul(Factor, NewAddSub);
490 if (Instruction *II = dyn_cast<Instruction>(RI))
491 II->setFastMathFlags(Flags);
495 Value *RI = createFDiv(NewAddSub, Factor);
496 if (Instruction *II = dyn_cast<Instruction>(RI))
497 II->setFastMathFlags(Flags);
501 Value *FAddCombine::simplify(Instruction *I) {
502 assert(I->hasUnsafeAlgebra() && "Should be in unsafe mode");
504 // Currently we are not able to handle vector type.
505 if (I->getType()->isVectorTy())
508 assert((I->getOpcode() == Instruction::FAdd ||
509 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
511 // Save the instruction before calling other member-functions.
514 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
516 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
518 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
519 unsigned Opnd0_ExpNum = 0;
520 unsigned Opnd1_ExpNum = 0;
522 if (!Opnd0.isConstant())
523 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
525 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
526 if (OpndNum == 2 && !Opnd1.isConstant())
527 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
529 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
530 if (Opnd0_ExpNum && Opnd1_ExpNum) {
532 AllOpnds.push_back(&Opnd0_0);
533 AllOpnds.push_back(&Opnd1_0);
534 if (Opnd0_ExpNum == 2)
535 AllOpnds.push_back(&Opnd0_1);
536 if (Opnd1_ExpNum == 2)
537 AllOpnds.push_back(&Opnd1_1);
539 // Compute instruction quota. We should save at least one instruction.
540 unsigned InstQuota = 0;
542 Value *V0 = I->getOperand(0);
543 Value *V1 = I->getOperand(1);
544 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
545 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
547 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
552 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
553 // splitted into two addends, say "V = X - Y", the instruction would have
554 // been optimized into "I = Y - X" in the previous steps.
556 const FAddendCoef &CE = Opnd0.getCoef();
557 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
560 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
563 AllOpnds.push_back(&Opnd0);
564 AllOpnds.push_back(&Opnd1_0);
565 if (Opnd1_ExpNum == 2)
566 AllOpnds.push_back(&Opnd1_1);
568 if (Value *R = simplifyFAdd(AllOpnds, 1))
572 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
575 AllOpnds.push_back(&Opnd1);
576 AllOpnds.push_back(&Opnd0_0);
577 if (Opnd0_ExpNum == 2)
578 AllOpnds.push_back(&Opnd0_1);
580 if (Value *R = simplifyFAdd(AllOpnds, 1))
584 // step 6: Try factorization as the last resort,
585 return performFactorization(I);
588 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
589 unsigned AddendNum = Addends.size();
590 assert(AddendNum <= 4 && "Too many addends");
592 // For saving intermediate results;
593 unsigned NextTmpIdx = 0;
594 FAddend TmpResult[3];
596 // Points to the constant addend of the resulting simplified expression.
597 // If the resulting expr has constant-addend, this constant-addend is
598 // desirable to reside at the top of the resulting expression tree. Placing
599 // constant close to supper-expr(s) will potentially reveal some optimization
600 // opportunities in super-expr(s).
602 const FAddend *ConstAdd = nullptr;
604 // Simplified addends are placed <SimpVect>.
607 // The outer loop works on one symbolic-value at a time. Suppose the input
608 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
609 // The symbolic-values will be processed in this order: x, y, z.
611 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
613 const FAddend *ThisAddend = Addends[SymIdx];
615 // This addend was processed before.
619 Value *Val = ThisAddend->getSymVal();
620 unsigned StartIdx = SimpVect.size();
621 SimpVect.push_back(ThisAddend);
623 // The inner loop collects addends sharing same symbolic-value, and these
624 // addends will be later on folded into a single addend. Following above
625 // example, if the symbolic value "y" is being processed, the inner loop
626 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
627 // be later on folded into "<b1+b2, y>".
629 for (unsigned SameSymIdx = SymIdx + 1;
630 SameSymIdx < AddendNum; SameSymIdx++) {
631 const FAddend *T = Addends[SameSymIdx];
632 if (T && T->getSymVal() == Val) {
633 // Set null such that next iteration of the outer loop will not process
634 // this addend again.
635 Addends[SameSymIdx] = nullptr;
636 SimpVect.push_back(T);
640 // If multiple addends share same symbolic value, fold them together.
641 if (StartIdx + 1 != SimpVect.size()) {
642 FAddend &R = TmpResult[NextTmpIdx ++];
643 R = *SimpVect[StartIdx];
644 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
647 // Pop all addends being folded and push the resulting folded addend.
648 SimpVect.resize(StartIdx);
651 SimpVect.push_back(&R);
654 // Don't push constant addend at this time. It will be the last element
661 assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
662 "out-of-bound access");
665 SimpVect.push_back(ConstAdd);
668 if (!SimpVect.empty())
669 Result = createNaryFAdd(SimpVect, InstrQuota);
671 // The addition is folded to 0.0.
672 Result = ConstantFP::get(Instr->getType(), 0.0);
678 Value *FAddCombine::createNaryFAdd
679 (const AddendVect &Opnds, unsigned InstrQuota) {
680 assert(!Opnds.empty() && "Expect at least one addend");
682 // Step 1: Check if the # of instructions needed exceeds the quota.
684 unsigned InstrNeeded = calcInstrNumber(Opnds);
685 if (InstrNeeded > InstrQuota)
690 // step 2: Emit the N-ary addition.
691 // Note that at most three instructions are involved in Fadd-InstCombine: the
692 // addition in question, and at most two neighboring instructions.
693 // The resulting optimized addition should have at least one less instruction
694 // than the original addition expression tree. This implies that the resulting
695 // N-ary addition has at most two instructions, and we don't need to worry
696 // about tree-height when constructing the N-ary addition.
698 Value *LastVal = nullptr;
699 bool LastValNeedNeg = false;
701 // Iterate the addends, creating fadd/fsub using adjacent two addends.
702 for (const FAddend *Opnd : Opnds) {
704 Value *V = createAddendVal(*Opnd, NeedNeg);
707 LastValNeedNeg = NeedNeg;
711 if (LastValNeedNeg == NeedNeg) {
712 LastVal = createFAdd(LastVal, V);
717 LastVal = createFSub(V, LastVal);
719 LastVal = createFSub(LastVal, V);
721 LastValNeedNeg = false;
724 if (LastValNeedNeg) {
725 LastVal = createFNeg(LastVal);
729 assert(CreateInstrNum == InstrNeeded &&
730 "Inconsistent in instruction numbers");
736 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
737 Value *V = Builder->CreateFSub(Opnd0, Opnd1);
738 if (Instruction *I = dyn_cast<Instruction>(V))
739 createInstPostProc(I);
743 Value *FAddCombine::createFNeg(Value *V) {
744 Value *Zero = cast<Value>(ConstantFP::getZeroValueForNegation(V->getType()));
745 Value *NewV = createFSub(Zero, V);
746 if (Instruction *I = dyn_cast<Instruction>(NewV))
747 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
751 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
752 Value *V = Builder->CreateFAdd(Opnd0, Opnd1);
753 if (Instruction *I = dyn_cast<Instruction>(V))
754 createInstPostProc(I);
758 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
759 Value *V = Builder->CreateFMul(Opnd0, Opnd1);
760 if (Instruction *I = dyn_cast<Instruction>(V))
761 createInstPostProc(I);
765 Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
766 Value *V = Builder->CreateFDiv(Opnd0, Opnd1);
767 if (Instruction *I = dyn_cast<Instruction>(V))
768 createInstPostProc(I);
772 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
773 NewInstr->setDebugLoc(Instr->getDebugLoc());
775 // Keep track of the number of instruction created.
779 // Propagate fast-math flags
780 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
783 // Return the number of instruction needed to emit the N-ary addition.
784 // NOTE: Keep this function in sync with createAddendVal().
785 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
786 unsigned OpndNum = Opnds.size();
787 unsigned InstrNeeded = OpndNum - 1;
789 // The number of addends in the form of "(-1)*x".
790 unsigned NegOpndNum = 0;
792 // Adjust the number of instructions needed to emit the N-ary add.
793 for (const FAddend *Opnd : Opnds) {
794 if (Opnd->isConstant())
797 const FAddendCoef &CE = Opnd->getCoef();
798 if (CE.isMinusOne() || CE.isMinusTwo())
801 // Let the addend be "c * x". If "c == +/-1", the value of the addend
802 // is immediately available; otherwise, it needs exactly one instruction
803 // to evaluate the value.
804 if (!CE.isMinusOne() && !CE.isOne())
807 if (NegOpndNum == OpndNum)
812 // Input Addend Value NeedNeg(output)
813 // ================================================================
814 // Constant C C false
815 // <+/-1, V> V coefficient is -1
816 // <2/-2, V> "fadd V, V" coefficient is -2
817 // <C, V> "fmul V, C" false
819 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
820 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
821 const FAddendCoef &Coeff = Opnd.getCoef();
823 if (Opnd.isConstant()) {
825 return Coeff.getValue(Instr->getType());
828 Value *OpndVal = Opnd.getSymVal();
830 if (Coeff.isMinusOne() || Coeff.isOne()) {
831 NeedNeg = Coeff.isMinusOne();
835 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
836 NeedNeg = Coeff.isMinusTwo();
837 return createFAdd(OpndVal, OpndVal);
841 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
844 // If one of the operands only has one non-zero bit, and if the other
845 // operand has a known-zero bit in a more significant place than it (not
846 // including the sign bit) the ripple may go up to and fill the zero, but
847 // won't change the sign. For example, (X & ~4) + 1.
848 static bool checkRippleForAdd(const APInt &Op0KnownZero,
849 const APInt &Op1KnownZero) {
850 APInt Op1MaybeOne = ~Op1KnownZero;
851 // Make sure that one of the operand has at most one bit set to 1.
852 if (Op1MaybeOne.countPopulation() != 1)
855 // Find the most significant known 0 other than the sign bit.
856 int BitWidth = Op0KnownZero.getBitWidth();
857 APInt Op0KnownZeroTemp(Op0KnownZero);
858 Op0KnownZeroTemp.clearBit(BitWidth - 1);
859 int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1;
861 int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1;
862 assert(Op1OnePosition >= 0);
864 // This also covers the case of no known zero, since in that case
865 // Op0ZeroPosition is -1.
866 return Op0ZeroPosition >= Op1OnePosition;
869 /// Return true if we can prove that:
870 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
871 /// This basically requires proving that the add in the original type would not
872 /// overflow to change the sign bit or have a carry out.
873 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
875 // There are different heuristics we can use for this. Here are some simple
878 // If LHS and RHS each have at least two sign bits, the addition will look
884 // If the carry into the most significant position is 0, X and Y can't both
885 // be 1 and therefore the carry out of the addition is also 0.
887 // If the carry into the most significant position is 1, X and Y can't both
888 // be 0 and therefore the carry out of the addition is also 1.
890 // Since the carry into the most significant position is always equal to
891 // the carry out of the addition, there is no signed overflow.
892 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
893 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
896 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
897 APInt LHSKnownZero(BitWidth, 0);
898 APInt LHSKnownOne(BitWidth, 0);
899 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
901 APInt RHSKnownZero(BitWidth, 0);
902 APInt RHSKnownOne(BitWidth, 0);
903 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
905 // Addition of two 2's complement numbers having opposite signs will never
907 if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) ||
908 (LHSKnownZero[BitWidth - 1] && RHSKnownOne[BitWidth - 1]))
911 // Check if carry bit of addition will not cause overflow.
912 if (checkRippleForAdd(LHSKnownZero, RHSKnownZero))
914 if (checkRippleForAdd(RHSKnownZero, LHSKnownZero))
920 /// \brief Return true if we can prove that:
921 /// (sub LHS, RHS) === (sub nsw LHS, RHS)
922 /// This basically requires proving that the add in the original type would not
923 /// overflow to change the sign bit or have a carry out.
924 /// TODO: Handle this for Vectors.
925 bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,
927 // If LHS and RHS each have at least two sign bits, the subtraction
929 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
930 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
933 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
934 APInt LHSKnownZero(BitWidth, 0);
935 APInt LHSKnownOne(BitWidth, 0);
936 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
938 APInt RHSKnownZero(BitWidth, 0);
939 APInt RHSKnownOne(BitWidth, 0);
940 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
942 // Subtraction of two 2's complement numbers having identical signs will
944 if ((LHSKnownOne[BitWidth - 1] && RHSKnownOne[BitWidth - 1]) ||
945 (LHSKnownZero[BitWidth - 1] && RHSKnownZero[BitWidth - 1]))
948 // TODO: implement logic similar to checkRippleForAdd
952 /// \brief Return true if we can prove that:
953 /// (sub LHS, RHS) === (sub nuw LHS, RHS)
954 bool InstCombiner::WillNotOverflowUnsignedSub(Value *LHS, Value *RHS,
956 // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
957 bool LHSKnownNonNegative, LHSKnownNegative;
958 bool RHSKnownNonNegative, RHSKnownNegative;
959 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0,
961 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0,
963 if (LHSKnownNegative && RHSKnownNonNegative)
969 // Checks if any operand is negative and we can convert add to sub.
970 // This function checks for following negative patterns
971 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
972 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
973 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
974 static Value *checkForNegativeOperand(BinaryOperator &I,
975 InstCombiner::BuilderTy *Builder) {
976 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
978 // This function creates 2 instructions to replace ADD, we need at least one
979 // of LHS or RHS to have one use to ensure benefit in transform.
980 if (!LHS->hasOneUse() && !RHS->hasOneUse())
983 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
984 const APInt *C1 = nullptr, *C2 = nullptr;
986 // if ONE is on other side, swap
987 if (match(RHS, m_Add(m_Value(X), m_One())))
990 if (match(LHS, m_Add(m_Value(X), m_One()))) {
991 // if XOR on other side, swap
992 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
995 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
996 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
997 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
998 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
999 Value *NewAnd = Builder->CreateAnd(Z, *C1);
1000 return Builder->CreateSub(RHS, NewAnd, "sub");
1001 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
1002 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
1003 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
1004 Value *NewOr = Builder->CreateOr(Z, ~(*C1));
1005 return Builder->CreateSub(RHS, NewOr, "sub");
1010 // Restore LHS and RHS
1011 LHS = I.getOperand(0);
1012 RHS = I.getOperand(1);
1014 // if XOR is on other side, swap
1015 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
1016 std::swap(LHS, RHS);
1019 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
1020 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
1021 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
1022 if (C1->countTrailingZeros() == 0)
1023 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
1024 Value *NewOr = Builder->CreateOr(Z, ~(*C2));
1025 return Builder->CreateSub(RHS, NewOr, "sub");
1030 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
1031 bool Changed = SimplifyAssociativeOrCommutative(I);
1032 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1034 if (Value *V = SimplifyVectorOp(I))
1035 return replaceInstUsesWith(I, V);
1037 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
1038 I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
1039 return replaceInstUsesWith(I, V);
1041 // (A*B)+(A*C) -> A*(B+C) etc
1042 if (Value *V = SimplifyUsingDistributiveLaws(I))
1043 return replaceInstUsesWith(I, V);
1046 if (match(RHS, m_APInt(RHSC))) {
1047 if (RHSC->isSignBit()) {
1048 // If wrapping is not allowed, then the addition must set the sign bit:
1049 // X + (signbit) --> X | signbit
1050 if (I.hasNoSignedWrap() || I.hasNoUnsignedWrap())
1051 return BinaryOperator::CreateOr(LHS, RHS);
1053 // If wrapping is allowed, then the addition flips the sign bit of LHS:
1054 // X + (signbit) --> X ^ signbit
1055 return BinaryOperator::CreateXor(LHS, RHS);
1058 // Is this add the last step in a convoluted sext?
1061 if (match(LHS, m_ZExt(m_Xor(m_Value(X), m_APInt(C)))) &&
1062 C->isMinSignedValue() &&
1063 C->sext(LHS->getType()->getScalarSizeInBits()) == *RHSC) {
1064 // add(zext(xor i16 X, -32768), -32768) --> sext X
1065 return CastInst::Create(Instruction::SExt, X, LHS->getType());
1068 if (RHSC->isNegative() &&
1069 match(LHS, m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C)))) &&
1070 RHSC->sge(-C->sext(RHSC->getBitWidth()))) {
1071 // (add (zext (add nuw X, C)), Val) -> (zext (add nuw X, C+Val))
1073 ConstantInt::get(X->getType(), *C + RHSC->trunc(C->getBitWidth()));
1074 return new ZExtInst(Builder->CreateNUWAdd(X, NewC), I.getType());
1078 // FIXME: Use the match above instead of dyn_cast to allow these transforms
1079 // for splat vectors.
1080 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1081 // zext(bool) + C -> bool ? C + 1 : C
1082 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
1083 if (ZI->getSrcTy()->isIntegerTy(1))
1084 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
1086 Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
1087 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
1088 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
1089 const APInt &RHSVal = CI->getValue();
1090 unsigned ExtendAmt = 0;
1091 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
1092 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
1093 if (XorRHS->getValue() == -RHSVal) {
1094 if (RHSVal.isPowerOf2())
1095 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
1096 else if (XorRHS->getValue().isPowerOf2())
1097 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
1101 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
1102 if (!MaskedValueIsZero(XorLHS, Mask, 0, &I))
1107 Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
1108 Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
1109 return BinaryOperator::CreateAShr(NewShl, ShAmt);
1112 // If this is a xor that was canonicalized from a sub, turn it back into
1113 // a sub and fuse this add with it.
1114 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
1115 IntegerType *IT = cast<IntegerType>(I.getType());
1116 APInt LHSKnownOne(IT->getBitWidth(), 0);
1117 APInt LHSKnownZero(IT->getBitWidth(), 0);
1118 computeKnownBits(XorLHS, LHSKnownZero, LHSKnownOne, 0, &I);
1119 if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
1120 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
1123 // (X + signbit) + C could have gotten canonicalized to (X ^ signbit) + C,
1124 // transform them into (X + (signbit ^ C))
1125 if (XorRHS->getValue().isSignBit())
1126 return BinaryOperator::CreateAdd(XorLHS,
1127 ConstantExpr::getXor(XorRHS, CI));
1131 if (isa<Constant>(RHS))
1132 if (Instruction *NV = foldOpWithConstantIntoOperand(I))
1135 if (I.getType()->getScalarType()->isIntegerTy(1))
1136 return BinaryOperator::CreateXor(LHS, RHS);
1140 BinaryOperator *New =
1141 BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
1142 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1143 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1148 // -A + -B --> -(A + B)
1149 if (Value *LHSV = dyn_castNegVal(LHS)) {
1150 if (!isa<Constant>(RHS))
1151 if (Value *RHSV = dyn_castNegVal(RHS)) {
1152 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
1153 return BinaryOperator::CreateNeg(NewAdd);
1156 return BinaryOperator::CreateSub(RHS, LHSV);
1160 if (!isa<Constant>(RHS))
1161 if (Value *V = dyn_castNegVal(RHS))
1162 return BinaryOperator::CreateSub(LHS, V);
1164 if (Value *V = checkForNegativeOperand(I, Builder))
1165 return replaceInstUsesWith(I, V);
1167 // A+B --> A|B iff A and B have no bits set in common.
1168 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1169 return BinaryOperator::CreateOr(LHS, RHS);
1171 if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
1173 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
1174 return BinaryOperator::CreateSub(SubOne(CRHS), X);
1177 // FIXME: We already did a check for ConstantInt RHS above this.
1178 // FIXME: Is this pattern covered by another fold? No regression tests fail on
1180 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
1181 // (X & FF00) + xx00 -> (X+xx00) & FF00
1184 if (LHS->hasOneUse() &&
1185 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
1186 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
1187 // See if all bits from the first bit set in the Add RHS up are included
1188 // in the mask. First, get the rightmost bit.
1189 const APInt &AddRHSV = CRHS->getValue();
1191 // Form a mask of all bits from the lowest bit added through the top.
1192 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
1194 // See if the and mask includes all of these bits.
1195 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
1197 if (AddRHSHighBits == AddRHSHighBitsAnd) {
1198 // Okay, the xform is safe. Insert the new add pronto.
1199 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
1200 return BinaryOperator::CreateAnd(NewAdd, C2);
1205 // add (select X 0 (sub n A)) A --> select X A n
1207 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1210 SI = dyn_cast<SelectInst>(RHS);
1213 if (SI && SI->hasOneUse()) {
1214 Value *TV = SI->getTrueValue();
1215 Value *FV = SI->getFalseValue();
1218 // Can we fold the add into the argument of the select?
1219 // We check both true and false select arguments for a matching subtract.
1220 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1221 // Fold the add into the true select value.
1222 return SelectInst::Create(SI->getCondition(), N, A);
1224 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1225 // Fold the add into the false select value.
1226 return SelectInst::Create(SI->getCondition(), A, N);
1230 // Check for (add (sext x), y), see if we can merge this into an
1231 // integer add followed by a sext.
1232 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
1233 // (add (sext x), cst) --> (sext (add x, cst'))
1234 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1235 if (LHSConv->hasOneUse()) {
1237 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1238 if (ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
1239 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
1240 // Insert the new, smaller add.
1242 Builder->CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
1243 return new SExtInst(NewAdd, I.getType());
1248 // (add (sext x), (sext y)) --> (sext (add int x, y))
1249 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
1250 // Only do this if x/y have the same type, if at least one of them has a
1251 // single use (so we don't increase the number of sexts), and if the
1252 // integer add will not overflow.
1253 if (LHSConv->getOperand(0)->getType() ==
1254 RHSConv->getOperand(0)->getType() &&
1255 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1256 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1257 RHSConv->getOperand(0), I)) {
1258 // Insert the new integer add.
1259 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1260 RHSConv->getOperand(0), "addconv");
1261 return new SExtInst(NewAdd, I.getType());
1266 // Check for (add (zext x), y), see if we can merge this into an
1267 // integer add followed by a zext.
1268 if (auto *LHSConv = dyn_cast<ZExtInst>(LHS)) {
1269 // (add (zext x), cst) --> (zext (add x, cst'))
1270 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1271 if (LHSConv->hasOneUse()) {
1273 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1274 if (ConstantExpr::getZExt(CI, I.getType()) == RHSC &&
1275 computeOverflowForUnsignedAdd(LHSConv->getOperand(0), CI, &I) ==
1276 OverflowResult::NeverOverflows) {
1277 // Insert the new, smaller add.
1279 Builder->CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
1280 return new ZExtInst(NewAdd, I.getType());
1285 // (add (zext x), (zext y)) --> (zext (add int x, y))
1286 if (auto *RHSConv = dyn_cast<ZExtInst>(RHS)) {
1287 // Only do this if x/y have the same type, if at least one of them has a
1288 // single use (so we don't increase the number of zexts), and if the
1289 // integer add will not overflow.
1290 if (LHSConv->getOperand(0)->getType() ==
1291 RHSConv->getOperand(0)->getType() &&
1292 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1293 computeOverflowForUnsignedAdd(LHSConv->getOperand(0),
1294 RHSConv->getOperand(0),
1295 &I) == OverflowResult::NeverOverflows) {
1296 // Insert the new integer add.
1297 Value *NewAdd = Builder->CreateNUWAdd(
1298 LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv");
1299 return new ZExtInst(NewAdd, I.getType());
1304 // (add (xor A, B) (and A, B)) --> (or A, B)
1306 Value *A = nullptr, *B = nullptr;
1307 if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
1308 match(LHS, m_c_And(m_Specific(A), m_Specific(B))))
1309 return BinaryOperator::CreateOr(A, B);
1311 if (match(LHS, m_Xor(m_Value(A), m_Value(B))) &&
1312 match(RHS, m_c_And(m_Specific(A), m_Specific(B))))
1313 return BinaryOperator::CreateOr(A, B);
1316 // (add (or A, B) (and A, B)) --> (add A, B)
1318 Value *A = nullptr, *B = nullptr;
1319 if (match(RHS, m_Or(m_Value(A), m_Value(B))) &&
1320 match(LHS, m_c_And(m_Specific(A), m_Specific(B)))) {
1321 auto *New = BinaryOperator::CreateAdd(A, B);
1322 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1323 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1327 if (match(LHS, m_Or(m_Value(A), m_Value(B))) &&
1328 match(RHS, m_c_And(m_Specific(A), m_Specific(B)))) {
1329 auto *New = BinaryOperator::CreateAdd(A, B);
1330 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1331 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1336 // TODO(jingyue): Consider WillNotOverflowSignedAdd and
1337 // WillNotOverflowUnsignedAdd to reduce the number of invocations of
1338 // computeKnownBits.
1339 if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS, I)) {
1341 I.setHasNoSignedWrap(true);
1343 if (!I.hasNoUnsignedWrap() &&
1344 computeOverflowForUnsignedAdd(LHS, RHS, &I) ==
1345 OverflowResult::NeverOverflows) {
1347 I.setHasNoUnsignedWrap(true);
1350 return Changed ? &I : nullptr;
1353 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
1354 bool Changed = SimplifyAssociativeOrCommutative(I);
1355 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1357 if (Value *V = SimplifyVectorOp(I))
1358 return replaceInstUsesWith(I, V);
1361 SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
1362 return replaceInstUsesWith(I, V);
1364 if (isa<Constant>(RHS))
1365 if (Instruction *FoldedFAdd = foldOpWithConstantIntoOperand(I))
1369 // -A + -B --> -(A + B)
1370 if (Value *LHSV = dyn_castFNegVal(LHS)) {
1371 Instruction *RI = BinaryOperator::CreateFSub(RHS, LHSV);
1372 RI->copyFastMathFlags(&I);
1377 if (!isa<Constant>(RHS))
1378 if (Value *V = dyn_castFNegVal(RHS)) {
1379 Instruction *RI = BinaryOperator::CreateFSub(LHS, V);
1380 RI->copyFastMathFlags(&I);
1384 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1385 // integer add followed by a promotion.
1386 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1387 Value *LHSIntVal = LHSConv->getOperand(0);
1389 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1390 // ... if the constant fits in the integer value. This is useful for things
1391 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1392 // requires a constant pool load, and generally allows the add to be better
1394 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
1396 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1397 if (LHSConv->hasOneUse() &&
1398 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1399 WillNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1400 // Insert the new integer add.
1401 Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
1403 return new SIToFPInst(NewAdd, I.getType());
1407 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1408 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1409 Value *RHSIntVal = RHSConv->getOperand(0);
1411 // Only do this if x/y have the same type, if at least one of them has a
1412 // single use (so we don't increase the number of int->fp conversions),
1413 // and if the integer add will not overflow.
1414 if (LHSIntVal->getType() == RHSIntVal->getType() &&
1415 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1416 WillNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1417 // Insert the new integer add.
1418 Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
1419 RHSIntVal, "addconv");
1420 return new SIToFPInst(NewAdd, I.getType());
1425 // select C, 0, B + select C, A, 0 -> select C, A, B
1427 Value *A1, *B1, *C1, *A2, *B2, *C2;
1428 if (match(LHS, m_Select(m_Value(C1), m_Value(A1), m_Value(B1))) &&
1429 match(RHS, m_Select(m_Value(C2), m_Value(A2), m_Value(B2)))) {
1431 Constant *Z1=nullptr, *Z2=nullptr;
1432 Value *A, *B, *C=C1;
1433 if (match(A1, m_AnyZero()) && match(B2, m_AnyZero())) {
1434 Z1 = dyn_cast<Constant>(A1); A = A2;
1435 Z2 = dyn_cast<Constant>(B2); B = B1;
1436 } else if (match(B1, m_AnyZero()) && match(A2, m_AnyZero())) {
1437 Z1 = dyn_cast<Constant>(B1); B = B2;
1438 Z2 = dyn_cast<Constant>(A2); A = A1;
1442 (I.hasNoSignedZeros() ||
1443 (Z1->isNegativeZeroValue() && Z2->isNegativeZeroValue()))) {
1444 return SelectInst::Create(C, A, B);
1450 if (I.hasUnsafeAlgebra()) {
1451 if (Value *V = FAddCombine(Builder).simplify(&I))
1452 return replaceInstUsesWith(I, V);
1455 return Changed ? &I : nullptr;
1458 /// Optimize pointer differences into the same array into a size. Consider:
1459 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1460 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1462 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
1464 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1466 bool Swapped = false;
1467 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1469 // For now we require one side to be the base pointer "A" or a constant
1470 // GEP derived from it.
1471 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1473 if (LHSGEP->getOperand(0) == RHS) {
1476 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1477 // (gep X, ...) - (gep X, ...)
1478 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1479 RHSGEP->getOperand(0)->stripPointerCasts()) {
1487 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1489 if (RHSGEP->getOperand(0) == LHS) {
1492 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1493 // (gep X, ...) - (gep X, ...)
1494 if (RHSGEP->getOperand(0)->stripPointerCasts() ==
1495 LHSGEP->getOperand(0)->stripPointerCasts()) {
1503 // Avoid duplicating the arithmetic if GEP2 has non-constant indices and
1506 (GEP2 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
1509 // Emit the offset of the GEP and an intptr_t.
1510 Value *Result = EmitGEPOffset(GEP1);
1512 // If we had a constant expression GEP on the other side offsetting the
1513 // pointer, subtract it from the offset we have.
1515 Value *Offset = EmitGEPOffset(GEP2);
1516 Result = Builder->CreateSub(Result, Offset);
1519 // If we have p - gep(p, ...) then we have to negate the result.
1521 Result = Builder->CreateNeg(Result, "diff.neg");
1523 return Builder->CreateIntCast(Result, Ty, true);
1526 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
1527 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1529 if (Value *V = SimplifyVectorOp(I))
1530 return replaceInstUsesWith(I, V);
1532 if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
1533 I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
1534 return replaceInstUsesWith(I, V);
1536 // (A*B)-(A*C) -> A*(B-C) etc
1537 if (Value *V = SimplifyUsingDistributiveLaws(I))
1538 return replaceInstUsesWith(I, V);
1540 // If this is a 'B = x-(-A)', change to B = x+A.
1541 if (Value *V = dyn_castNegVal(Op1)) {
1542 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1544 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1545 assert(BO->getOpcode() == Instruction::Sub &&
1546 "Expected a subtraction operator!");
1547 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1548 Res->setHasNoSignedWrap(true);
1550 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1551 Res->setHasNoSignedWrap(true);
1557 if (I.getType()->getScalarType()->isIntegerTy(1))
1558 return BinaryOperator::CreateXor(Op0, Op1);
1560 // Replace (-1 - A) with (~A).
1561 if (match(Op0, m_AllOnes()))
1562 return BinaryOperator::CreateNot(Op1);
1564 if (Constant *C = dyn_cast<Constant>(Op0)) {
1565 // C - ~X == X + (1+C)
1567 if (match(Op1, m_Not(m_Value(X))))
1568 return BinaryOperator::CreateAdd(X, AddOne(C));
1570 // Try to fold constant sub into select arguments.
1571 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1572 if (Instruction *R = FoldOpIntoSelect(I, SI))
1575 // Try to fold constant sub into PHI values.
1576 if (PHINode *PN = dyn_cast<PHINode>(Op1))
1577 if (Instruction *R = foldOpIntoPhi(I, PN))
1580 // C-(X+C2) --> (C-C2)-X
1582 if (match(Op1, m_Add(m_Value(X), m_Constant(C2))))
1583 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1585 // Fold (sub 0, (zext bool to B)) --> (sext bool to B)
1586 if (C->isNullValue() && match(Op1, m_ZExt(m_Value(X))))
1587 if (X->getType()->getScalarType()->isIntegerTy(1))
1588 return CastInst::CreateSExtOrBitCast(X, Op1->getType());
1590 // Fold (sub 0, (sext bool to B)) --> (zext bool to B)
1591 if (C->isNullValue() && match(Op1, m_SExt(m_Value(X))))
1592 if (X->getType()->getScalarType()->isIntegerTy(1))
1593 return CastInst::CreateZExtOrBitCast(X, Op1->getType());
1597 if (match(Op0, m_APInt(Op0C))) {
1598 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1600 // -(X >>u 31) -> (X >>s 31)
1601 // -(X >>s 31) -> (X >>u 31)
1605 if (match(Op1, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1606 *ShAmt == BitWidth - 1) {
1607 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1608 return BinaryOperator::CreateAShr(X, ShAmtOp);
1610 if (match(Op1, m_AShr(m_Value(X), m_APInt(ShAmt))) &&
1611 *ShAmt == BitWidth - 1) {
1612 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1613 return BinaryOperator::CreateLShr(X, ShAmtOp);
1617 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1619 if (Op0C->isMask()) {
1620 APInt RHSKnownZero(BitWidth, 0);
1621 APInt RHSKnownOne(BitWidth, 0);
1622 computeKnownBits(Op1, RHSKnownZero, RHSKnownOne, 0, &I);
1623 if ((*Op0C | RHSKnownZero).isAllOnesValue())
1624 return BinaryOperator::CreateXor(Op1, Op0);
1630 // X-(X+Y) == -Y X-(Y+X) == -Y
1631 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
1632 return BinaryOperator::CreateNeg(Y);
1635 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1636 return BinaryOperator::CreateNeg(Y);
1639 // (sub (or A, B) (xor A, B)) --> (and A, B)
1642 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1643 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1644 return BinaryOperator::CreateAnd(A, B);
1649 // ((X | Y) - X) --> (~X & Y)
1650 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
1651 return BinaryOperator::CreateAnd(
1652 Y, Builder->CreateNot(Op1, Op1->getName() + ".not"));
1655 if (Op1->hasOneUse()) {
1656 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
1657 Constant *C = nullptr;
1659 // (X - (Y - Z)) --> (X + (Z - Y)).
1660 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
1661 return BinaryOperator::CreateAdd(Op0,
1662 Builder->CreateSub(Z, Y, Op1->getName()));
1664 // (X - (X & Y)) --> (X & ~Y)
1666 if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
1667 return BinaryOperator::CreateAnd(Op0,
1668 Builder->CreateNot(Y, Y->getName() + ".not"));
1670 // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
1671 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
1672 C->isNotMinSignedValue() && !C->isOneValue())
1673 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
1675 // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
1676 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
1677 if (Value *XNeg = dyn_castNegVal(X))
1678 return BinaryOperator::CreateShl(XNeg, Y);
1680 // Subtracting -1/0 is the same as adding 1/0:
1681 // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y)
1682 // 'nuw' is dropped in favor of the canonical form.
1683 if (match(Op1, m_SExt(m_Value(Y))) &&
1684 Y->getType()->getScalarSizeInBits() == 1) {
1685 Value *Zext = Builder->CreateZExt(Y, I.getType());
1686 BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
1687 Add->setHasNoSignedWrap(I.hasNoSignedWrap());
1691 // X - A*-B -> X + A*B
1692 // X - -A*B -> X + A*B
1695 if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
1696 return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
1698 // X - A*CI -> X + A*-CI
1699 // No need to handle commuted multiply because multiply handling will
1700 // ensure constant will be move to the right hand side.
1701 if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) {
1702 Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
1703 return BinaryOperator::CreateAdd(Op0, NewMul);
1707 // Optimize pointer differences into the same array into a size. Consider:
1708 // &A[10] - &A[0]: we should compile this to "10".
1709 Value *LHSOp, *RHSOp;
1710 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1711 match(Op1, m_PtrToInt(m_Value(RHSOp))))
1712 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1713 return replaceInstUsesWith(I, Res);
1715 // trunc(p)-trunc(q) -> trunc(p-q)
1716 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1717 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1718 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1719 return replaceInstUsesWith(I, Res);
1721 bool Changed = false;
1722 if (!I.hasNoSignedWrap() && WillNotOverflowSignedSub(Op0, Op1, I)) {
1724 I.setHasNoSignedWrap(true);
1726 if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedSub(Op0, Op1, I)) {
1728 I.setHasNoUnsignedWrap(true);
1731 return Changed ? &I : nullptr;
1734 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
1735 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1737 if (Value *V = SimplifyVectorOp(I))
1738 return replaceInstUsesWith(I, V);
1741 SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
1742 return replaceInstUsesWith(I, V);
1744 // fsub nsz 0, X ==> fsub nsz -0.0, X
1745 if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_Zero())) {
1746 // Subtraction from -0.0 is the canonical form of fneg.
1747 Instruction *NewI = BinaryOperator::CreateFNeg(Op1);
1748 NewI->copyFastMathFlags(&I);
1752 if (isa<Constant>(Op0))
1753 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1754 if (Instruction *NV = FoldOpIntoSelect(I, SI))
1757 // If this is a 'B = x-(-A)', change to B = x+A, potentially looking
1758 // through FP extensions/truncations along the way.
1759 if (Value *V = dyn_castFNegVal(Op1)) {
1760 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, V);
1761 NewI->copyFastMathFlags(&I);
1764 if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
1765 if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
1766 Value *NewTrunc = Builder->CreateFPTrunc(V, I.getType());
1767 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
1768 NewI->copyFastMathFlags(&I);
1771 } else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
1772 if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
1773 Value *NewExt = Builder->CreateFPExt(V, I.getType());
1774 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
1775 NewI->copyFastMathFlags(&I);
1780 if (I.hasUnsafeAlgebra()) {
1781 if (Value *V = FAddCombine(Builder).simplify(&I))
1782 return replaceInstUsesWith(I, V);