1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitAnd, visitOr, and visitXor functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/Analysis/CmpInstAnalysis.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/IR/ConstantRange.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Transforms/Utils/Local.h"
22 using namespace PatternMatch;
24 #define DEBUG_TYPE "instcombine"
26 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into
28 static unsigned getFCmpCode(FCmpInst::Predicate CC) {
29 assert(FCmpInst::FCMP_FALSE <= CC && CC <= FCmpInst::FCMP_TRUE &&
30 "Unexpected FCmp predicate!");
31 // Take advantage of the bit pattern of FCmpInst::Predicate here.
33 static_assert(FCmpInst::FCMP_FALSE == 0, ""); // 0 0 0 0
34 static_assert(FCmpInst::FCMP_OEQ == 1, ""); // 0 0 0 1
35 static_assert(FCmpInst::FCMP_OGT == 2, ""); // 0 0 1 0
36 static_assert(FCmpInst::FCMP_OGE == 3, ""); // 0 0 1 1
37 static_assert(FCmpInst::FCMP_OLT == 4, ""); // 0 1 0 0
38 static_assert(FCmpInst::FCMP_OLE == 5, ""); // 0 1 0 1
39 static_assert(FCmpInst::FCMP_ONE == 6, ""); // 0 1 1 0
40 static_assert(FCmpInst::FCMP_ORD == 7, ""); // 0 1 1 1
41 static_assert(FCmpInst::FCMP_UNO == 8, ""); // 1 0 0 0
42 static_assert(FCmpInst::FCMP_UEQ == 9, ""); // 1 0 0 1
43 static_assert(FCmpInst::FCMP_UGT == 10, ""); // 1 0 1 0
44 static_assert(FCmpInst::FCMP_UGE == 11, ""); // 1 0 1 1
45 static_assert(FCmpInst::FCMP_ULT == 12, ""); // 1 1 0 0
46 static_assert(FCmpInst::FCMP_ULE == 13, ""); // 1 1 0 1
47 static_assert(FCmpInst::FCMP_UNE == 14, ""); // 1 1 1 0
48 static_assert(FCmpInst::FCMP_TRUE == 15, ""); // 1 1 1 1
52 /// This is the complement of getICmpCode, which turns an opcode and two
53 /// operands into either a constant true or false, or a brand new ICmp
54 /// instruction. The sign is passed in to determine which kind of predicate to
55 /// use in the new icmp instruction.
56 static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
57 InstCombiner::BuilderTy &Builder) {
58 ICmpInst::Predicate NewPred;
59 if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
61 return Builder.CreateICmp(NewPred, LHS, RHS);
64 /// This is the complement of getFCmpCode, which turns an opcode and two
65 /// operands into either a FCmp instruction, or a true/false constant.
66 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
67 InstCombiner::BuilderTy &Builder) {
68 const auto Pred = static_cast<FCmpInst::Predicate>(Code);
69 assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
70 "Unexpected FCmp predicate!");
71 if (Pred == FCmpInst::FCMP_FALSE)
72 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
73 if (Pred == FCmpInst::FCMP_TRUE)
74 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
75 return Builder.CreateFCmp(Pred, LHS, RHS);
78 /// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
79 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
80 /// \param I Binary operator to transform.
81 /// \return Pointer to node that must replace the original binary operator, or
82 /// null pointer if no transformation was made.
83 static Value *SimplifyBSwap(BinaryOperator &I,
84 InstCombiner::BuilderTy &Builder) {
85 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
87 Value *OldLHS = I.getOperand(0);
88 Value *OldRHS = I.getOperand(1);
91 if (!match(OldLHS, m_BSwap(m_Value(NewLHS))))
97 if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) {
98 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
99 if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse())
101 // NewRHS initialized by the matcher.
102 } else if (match(OldRHS, m_APInt(C))) {
103 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
104 if (!OldLHS->hasOneUse())
106 NewRHS = ConstantInt::get(I.getType(), C->byteSwap());
110 Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
111 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
113 return Builder.CreateCall(F, BinOp);
116 /// This handles expressions of the form ((val OP C1) & C2). Where
117 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'.
118 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
121 BinaryOperator &TheAnd) {
122 Value *X = Op->getOperand(0);
124 switch (Op->getOpcode()) {
126 case Instruction::Add:
127 if (Op->hasOneUse()) {
128 // Adding a one to a single bit bit-field should be turned into an XOR
129 // of the bit. First thing to check is to see if this AND is with a
130 // single bit constant.
131 const APInt &AndRHSV = AndRHS->getValue();
133 // If there is only one bit set.
134 if (AndRHSV.isPowerOf2()) {
135 // Ok, at this point, we know that we are masking the result of the
136 // ADD down to exactly one bit. If the constant we are adding has
137 // no bits set below this bit, then we can eliminate the ADD.
138 const APInt& AddRHS = OpRHS->getValue();
140 // Check to see if any bits below the one bit set in AndRHSV are set.
141 if ((AddRHS & (AndRHSV - 1)).isNullValue()) {
142 // If not, the only thing that can effect the output of the AND is
143 // the bit specified by AndRHSV. If that bit is set, the effect of
144 // the XOR is to toggle the bit. If it is clear, then the ADD has
146 if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop
147 TheAnd.setOperand(0, X);
150 // Pull the XOR out of the AND.
151 Value *NewAnd = Builder.CreateAnd(X, AndRHS);
152 NewAnd->takeName(Op);
153 return BinaryOperator::CreateXor(NewAnd, AndRHS);
163 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
164 /// (V < Lo || V >= Hi). This method expects that Lo <= Hi. IsSigned indicates
165 /// whether to treat V, Lo, and Hi as signed or not.
166 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
167 bool isSigned, bool Inside) {
168 assert((isSigned ? Lo.sle(Hi) : Lo.ule(Hi)) &&
169 "Lo is not <= Hi in range emission code!");
171 Type *Ty = V->getType();
173 return Inside ? ConstantInt::getFalse(Ty) : ConstantInt::getTrue(Ty);
175 // V >= Min && V < Hi --> V < Hi
176 // V < Min || V >= Hi --> V >= Hi
177 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
178 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
179 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
180 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
183 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
184 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
186 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
187 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
188 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
191 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
192 /// that can be simplified.
193 /// One of A and B is considered the mask. The other is the value. This is
194 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
195 /// only "Mask", then both A and B can be considered masks. If A is the mask,
196 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
197 /// If both A and C are constants, this proof is also easy.
198 /// For the following explanations, we assume that A is the mask.
200 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
201 /// bits of A are set in B.
202 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
204 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
205 /// bits of A are cleared in B.
206 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
208 /// "Mixed" declares that (A & B) == C and C might or might not contain any
209 /// number of one bits and zero bits.
210 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
212 /// "Not" means that in above descriptions "==" should be replaced by "!=".
213 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
215 /// If the mask A contains a single bit, then the following is equivalent:
216 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
217 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
218 enum MaskedICmpType {
220 AMask_NotAllOnes = 2,
222 BMask_NotAllOnes = 8,
224 Mask_NotAllZeros = 32,
226 AMask_NotMixed = 128,
231 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
233 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
234 ICmpInst::Predicate Pred) {
235 ConstantInt *ACst = dyn_cast<ConstantInt>(A);
236 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
237 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
238 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
239 bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2());
240 bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2());
241 unsigned MaskVal = 0;
242 if (CCst && CCst->isZero()) {
243 // if C is zero, then both A and B qualify as mask
244 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
245 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
247 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
248 : (AMask_AllOnes | AMask_Mixed));
250 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
251 : (BMask_AllOnes | BMask_Mixed));
256 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
257 : (AMask_NotAllOnes | AMask_NotMixed));
259 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
260 : (Mask_AllZeros | AMask_Mixed));
261 } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) {
262 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
266 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
267 : (BMask_NotAllOnes | BMask_NotMixed));
269 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
270 : (Mask_AllZeros | BMask_Mixed));
271 } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) {
272 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
278 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
279 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
280 /// is adjacent to the corresponding normal flag (recording ==), this just
281 /// involves swapping those bits over.
282 static unsigned conjugateICmpMask(unsigned Mask) {
284 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
285 AMask_Mixed | BMask_Mixed))
288 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
289 AMask_NotMixed | BMask_NotMixed))
295 // Adapts the external decomposeBitTestICmp for local use.
296 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
297 Value *&X, Value *&Y, Value *&Z) {
299 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask))
302 Y = ConstantInt::get(X->getType(), Mask);
303 Z = ConstantInt::get(X->getType(), 0);
307 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
308 /// Return the set of pattern classes (from MaskedICmpType) that both LHS and
310 static unsigned getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
311 Value *&D, Value *&E, ICmpInst *LHS,
313 ICmpInst::Predicate &PredL,
314 ICmpInst::Predicate &PredR) {
315 // vectors are not (yet?) supported. Don't support pointers either.
316 if (!LHS->getOperand(0)->getType()->isIntegerTy() ||
317 !RHS->getOperand(0)->getType()->isIntegerTy())
320 // Here comes the tricky part:
321 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
322 // and L11 & L12 == L21 & L22. The same goes for RHS.
323 // Now we must find those components L** and R**, that are equal, so
324 // that we can extract the parameters A, B, C, D, and E for the canonical
326 Value *L1 = LHS->getOperand(0);
327 Value *L2 = LHS->getOperand(1);
328 Value *L11, *L12, *L21, *L22;
329 // Check whether the icmp can be decomposed into a bit test.
330 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) {
331 L21 = L22 = L1 = nullptr;
333 // Look for ANDs in the LHS icmp.
334 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
335 // Any icmp can be viewed as being trivially masked; if it allows us to
336 // remove one, it's worth it.
338 L12 = Constant::getAllOnesValue(L1->getType());
341 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
343 L22 = Constant::getAllOnesValue(L2->getType());
347 // Bail if LHS was a icmp that can't be decomposed into an equality.
348 if (!ICmpInst::isEquality(PredL))
351 Value *R1 = RHS->getOperand(0);
352 Value *R2 = RHS->getOperand(1);
355 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) {
356 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
359 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
369 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
370 // As before, model no mask as a trivial mask if it'll let us do an
373 R12 = Constant::getAllOnesValue(R1->getType());
376 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
381 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
389 // Bail if RHS was a icmp that can't be decomposed into an equality.
390 if (!ICmpInst::isEquality(PredR))
393 // Look for ANDs on the right side of the RHS icmp.
395 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
397 R12 = Constant::getAllOnesValue(R2->getType());
400 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
405 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
420 } else if (L12 == A) {
423 } else if (L21 == A) {
426 } else if (L22 == A) {
431 unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
432 unsigned RightType = getMaskedICmpType(A, D, E, PredR);
433 return LeftType & RightType;
436 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
437 /// into a single (icmp(A & X) ==/!= Y).
438 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
439 llvm::InstCombiner::BuilderTy &Builder) {
440 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
441 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
443 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
447 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
448 "Expected equality predicates for masked type of icmps.");
450 // In full generality:
451 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
452 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
454 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
455 // equivalent to (icmp (A & X) !Op Y).
457 // Therefore, we can pretend for the rest of this function that we're dealing
458 // with the conjunction, provided we flip the sense of any comparisons (both
459 // input and output).
461 // In most cases we're going to produce an EQ for the "&&" case.
462 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
464 // Convert the masking analysis into its equivalent with negated
466 Mask = conjugateICmpMask(Mask);
469 if (Mask & Mask_AllZeros) {
470 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
471 // -> (icmp eq (A & (B|D)), 0)
472 Value *NewOr = Builder.CreateOr(B, D);
473 Value *NewAnd = Builder.CreateAnd(A, NewOr);
474 // We can't use C as zero because we might actually handle
475 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
476 // with B and D, having a single bit set.
477 Value *Zero = Constant::getNullValue(A->getType());
478 return Builder.CreateICmp(NewCC, NewAnd, Zero);
480 if (Mask & BMask_AllOnes) {
481 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
482 // -> (icmp eq (A & (B|D)), (B|D))
483 Value *NewOr = Builder.CreateOr(B, D);
484 Value *NewAnd = Builder.CreateAnd(A, NewOr);
485 return Builder.CreateICmp(NewCC, NewAnd, NewOr);
487 if (Mask & AMask_AllOnes) {
488 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
489 // -> (icmp eq (A & (B&D)), A)
490 Value *NewAnd1 = Builder.CreateAnd(B, D);
491 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
492 return Builder.CreateICmp(NewCC, NewAnd2, A);
495 // Remaining cases assume at least that B and D are constant, and depend on
496 // their actual values. This isn't strictly necessary, just a "handle the
497 // easy cases for now" decision.
498 ConstantInt *BCst = dyn_cast<ConstantInt>(B);
501 ConstantInt *DCst = dyn_cast<ConstantInt>(D);
505 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
506 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
507 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
508 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
509 // Only valid if one of the masks is a superset of the other (check "B&D" is
510 // the same as either B or D).
511 APInt NewMask = BCst->getValue() & DCst->getValue();
513 if (NewMask == BCst->getValue())
515 else if (NewMask == DCst->getValue())
519 if (Mask & AMask_NotAllOnes) {
520 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
521 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
522 // Only valid if one of the masks is a superset of the other (check "B|D" is
523 // the same as either B or D).
524 APInt NewMask = BCst->getValue() | DCst->getValue();
526 if (NewMask == BCst->getValue())
528 else if (NewMask == DCst->getValue())
532 if (Mask & BMask_Mixed) {
533 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
534 // We already know that B & C == C && D & E == E.
535 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
536 // C and E, which are shared by both the mask B and the mask D, don't
537 // contradict, then we can transform to
538 // -> (icmp eq (A & (B|D)), (C|E))
539 // Currently, we only handle the case of B, C, D, and E being constant.
540 // We can't simply use C and E because we might actually handle
541 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
542 // with B and D, having a single bit set.
543 ConstantInt *CCst = dyn_cast<ConstantInt>(C);
546 ConstantInt *ECst = dyn_cast<ConstantInt>(E);
550 CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst));
552 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
554 // If there is a conflict, we should actually return a false for the
556 if (((BCst->getValue() & DCst->getValue()) &
557 (CCst->getValue() ^ ECst->getValue())).getBoolValue())
558 return ConstantInt::get(LHS->getType(), !IsAnd);
560 Value *NewOr1 = Builder.CreateOr(B, D);
561 Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
562 Value *NewAnd = Builder.CreateAnd(A, NewOr1);
563 return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
569 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
570 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
571 /// If \p Inverted is true then the check is for the inverted range, e.g.
572 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
573 Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
575 // Check the lower range comparison, e.g. x >= 0
576 // InstCombine already ensured that if there is a constant it's on the RHS.
577 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
581 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
582 Cmp0->getPredicate());
584 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
585 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
586 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
589 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
590 Cmp1->getPredicate());
592 Value *Input = Cmp0->getOperand(0);
594 if (Cmp1->getOperand(0) == Input) {
595 // For the upper range compare we have: icmp x, n
596 RangeEnd = Cmp1->getOperand(1);
597 } else if (Cmp1->getOperand(1) == Input) {
598 // For the upper range compare we have: icmp n, x
599 RangeEnd = Cmp1->getOperand(0);
600 Pred1 = ICmpInst::getSwappedPredicate(Pred1);
605 // Check the upper range comparison, e.g. x < n
606 ICmpInst::Predicate NewPred;
608 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
609 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
610 default: return nullptr;
613 // This simplification is only valid if the upper range is not negative.
614 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
615 if (!Known.isNonNegative())
619 NewPred = ICmpInst::getInversePredicate(NewPred);
621 return Builder.CreateICmp(NewPred, Input, RangeEnd);
625 foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
627 InstCombiner::BuilderTy &Builder) {
628 Value *X = LHS->getOperand(0);
629 if (X != RHS->getOperand(0))
632 const APInt *C1, *C2;
633 if (!match(LHS->getOperand(1), m_APInt(C1)) ||
634 !match(RHS->getOperand(1), m_APInt(C2)))
637 // We only handle (X != C1 && X != C2) and (X == C1 || X == C2).
638 ICmpInst::Predicate Pred = LHS->getPredicate();
639 if (Pred != RHS->getPredicate())
641 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
643 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
646 // The larger unsigned constant goes on the right.
650 APInt Xor = *C1 ^ *C2;
651 if (Xor.isPowerOf2()) {
652 // If LHSC and RHSC differ by only one bit, then set that bit in X and
653 // compare against the larger constant:
654 // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2
655 // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
656 // We choose an 'or' with a Pow2 constant rather than the inverse mask with
657 // 'and' because that may lead to smaller codegen from a smaller constant.
658 Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor));
659 return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
662 // Special case: get the ordering right when the values wrap around zero.
663 // Ie, we assumed the constants were unsigned when swapping earlier.
664 if (C1->isNullValue() && C2->isAllOnesValue())
667 if (*C1 == *C2 - 1) {
668 // (X == 13 || X == 14) --> X - 13 <=u 1
669 // (X != 13 && X != 14) --> X - 13 >u 1
670 // An 'add' is the canonical IR form, so favor that over a 'sub'.
671 Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
672 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
673 return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
679 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
680 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
681 Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
684 ICmpInst::Predicate Pred = LHS->getPredicate();
685 if (Pred != RHS->getPredicate())
687 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
689 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
692 // TODO support vector splats
693 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
694 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
695 if (!LHSC || !RHSC || !LHSC->isZero() || !RHSC->isZero())
698 Value *A, *B, *C, *D;
699 if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) &&
700 match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) {
701 if (A == D || B == D)
707 isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
708 isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
709 Value *Mask = Builder.CreateOr(B, D);
710 Value *Masked = Builder.CreateAnd(A, Mask);
711 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
712 return Builder.CreateICmp(NewPred, Masked, Mask);
719 /// Fold (icmp)&(icmp) if possible.
720 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
722 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
723 // if K1 and K2 are a one-bit mask.
724 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, true, CxtI))
727 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
729 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
730 if (PredicatesFoldable(PredL, PredR)) {
731 if (LHS->getOperand(0) == RHS->getOperand(1) &&
732 LHS->getOperand(1) == RHS->getOperand(0))
734 if (LHS->getOperand(0) == RHS->getOperand(0) &&
735 LHS->getOperand(1) == RHS->getOperand(1)) {
736 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
737 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
738 bool isSigned = LHS->isSigned() || RHS->isSigned();
739 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
743 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
744 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
747 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
748 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
751 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
752 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
755 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder))
758 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
759 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
760 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
761 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
765 if (LHSC == RHSC && PredL == PredR) {
766 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
767 // where C is a power of 2 or
768 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
769 if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
770 (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
771 Value *NewOr = Builder.CreateOr(LHS0, RHS0);
772 return Builder.CreateICmp(PredL, NewOr, LHSC);
776 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
777 // where CMAX is the all ones value for the truncated type,
778 // iff the lower bits of C2 and CA are zero.
779 if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() &&
782 ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr;
784 // (trunc x) == C1 & (and x, CA) == C2
785 // (and x, CA) == C2 & (trunc x) == C1
786 if (match(RHS0, m_Trunc(m_Value(V))) &&
787 match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
790 } else if (match(LHS0, m_Trunc(m_Value(V))) &&
791 match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
796 if (SmallC && BigC) {
797 unsigned BigBitSize = BigC->getType()->getBitWidth();
798 unsigned SmallBitSize = SmallC->getType()->getBitWidth();
800 // Check that the low bits are zero.
801 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
802 if ((Low & AndC->getValue()).isNullValue() &&
803 (Low & BigC->getValue()).isNullValue()) {
804 Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue());
805 APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
806 Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
807 return Builder.CreateICmp(PredL, NewAnd, NewVal);
812 // From here on, we only handle:
813 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
817 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
818 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
819 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
820 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
821 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
824 // We can't fold (ugt x, C) & (sgt x, C2).
825 if (!PredicatesFoldable(PredL, PredR))
828 // Ensure that the larger constant is on the RHS.
830 if (CmpInst::isSigned(PredL) ||
831 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
832 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
834 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
838 std::swap(LHSC, RHSC);
839 std::swap(PredL, PredR);
842 // At this point, we know we have two icmp instructions
843 // comparing a value against two constants and and'ing the result
844 // together. Because of the above check, we know that we only have
845 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
846 // (from the icmp folding check above), that the two constants
847 // are not equal and that the larger constant is on the RHS
848 assert(LHSC != RHSC && "Compares not folded above?");
852 llvm_unreachable("Unknown integer condition code!");
853 case ICmpInst::ICMP_NE:
856 llvm_unreachable("Unknown integer condition code!");
857 case ICmpInst::ICMP_ULT:
858 if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13
859 return Builder.CreateICmpULT(LHS0, LHSC);
860 if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13
861 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
863 break; // (X != 13 & X u< 15) -> no change
864 case ICmpInst::ICMP_SLT:
865 if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
866 return Builder.CreateICmpSLT(LHS0, LHSC);
867 break; // (X != 13 & X s< 15) -> no change
868 case ICmpInst::ICMP_NE:
869 // Potential folds for this case should already be handled.
873 case ICmpInst::ICMP_UGT:
876 llvm_unreachable("Unknown integer condition code!");
877 case ICmpInst::ICMP_NE:
878 if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
879 return Builder.CreateICmp(PredL, LHS0, RHSC);
880 break; // (X u> 13 & X != 15) -> no change
881 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
882 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
886 case ICmpInst::ICMP_SGT:
889 llvm_unreachable("Unknown integer condition code!");
890 case ICmpInst::ICMP_NE:
891 if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
892 return Builder.CreateICmp(PredL, LHS0, RHSC);
893 break; // (X s> 13 & X != 15) -> no change
894 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
895 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
904 Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
905 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
906 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
907 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
909 if (LHS0 == RHS1 && RHS0 == LHS1) {
910 // Swap RHS operands to match LHS.
911 PredR = FCmpInst::getSwappedPredicate(PredR);
912 std::swap(RHS0, RHS1);
915 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
916 // Suppose the relation between x and y is R, where R is one of
917 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
918 // testing the desired relations.
920 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
921 // bool(R & CC0) && bool(R & CC1)
922 // = bool((R & CC0) & (R & CC1))
923 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
925 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
926 // bool(R & CC0) || bool(R & CC1)
927 // = bool((R & CC0) | (R & CC1))
928 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
929 if (LHS0 == RHS0 && LHS1 == RHS1) {
930 unsigned FCmpCodeL = getFCmpCode(PredL);
931 unsigned FCmpCodeR = getFCmpCode(PredR);
932 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
933 return getFCmpValue(NewPred, LHS0, LHS1, Builder);
936 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
937 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
938 if (LHS0->getType() != RHS0->getType())
941 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
942 // (fcmp ord/uno X, C) will be transformed to (fcmp X, 0.0).
943 if (match(LHS1, m_Zero()) && LHS1 == RHS1)
944 // Ignore the constants because they are obviously not NANs:
945 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
946 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
947 return Builder.CreateFCmp(PredL, LHS0, RHS0);
953 /// Match De Morgan's Laws:
954 /// (~A & ~B) == (~(A | B))
955 /// (~A | ~B) == (~(A & B))
956 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
957 InstCombiner::BuilderTy &Builder) {
958 auto Opcode = I.getOpcode();
959 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
960 "Trying to match De Morgan's Laws with something other than and/or");
962 // Flip the logic operation.
963 Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
966 if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) &&
967 match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) &&
968 !IsFreeToInvert(A, A->hasOneUse()) &&
969 !IsFreeToInvert(B, B->hasOneUse())) {
970 Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan");
971 return BinaryOperator::CreateNot(AndOr);
977 bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
978 Value *CastSrc = CI->getOperand(0);
980 // Noop casts and casts of constants should be eliminated trivially.
981 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
984 // If this cast is paired with another cast that can be eliminated, we prefer
985 // to have it eliminated.
986 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
987 if (isEliminableCastPair(PrecedingCI, CI))
993 /// Fold {and,or,xor} (cast X), C.
994 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
995 InstCombiner::BuilderTy &Builder) {
996 Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
1000 auto LogicOpc = Logic.getOpcode();
1001 Type *DestTy = Logic.getType();
1002 Type *SrcTy = Cast->getSrcTy();
1004 // Move the logic operation ahead of a zext or sext if the constant is
1005 // unchanged in the smaller source type. Performing the logic in a smaller
1006 // type may provide more information to later folds, and the smaller logic
1007 // instruction may be cheaper (particularly in the case of vectors).
1009 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1010 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1011 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
1012 if (ZextTruncC == C) {
1013 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1014 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1015 return new ZExtInst(NewOp, DestTy);
1019 if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) {
1020 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1021 Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy);
1022 if (SextTruncC == C) {
1023 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1024 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1025 return new SExtInst(NewOp, DestTy);
1032 /// Fold {and,or,xor} (cast X), Y.
1033 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
1034 auto LogicOpc = I.getOpcode();
1035 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1037 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1038 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1042 // This must be a cast from an integer or integer vector source type to allow
1043 // transformation of the logic operation to the source type.
1044 Type *DestTy = I.getType();
1045 Type *SrcTy = Cast0->getSrcTy();
1046 if (!SrcTy->isIntOrIntVectorTy())
1049 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder))
1052 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1056 // Both operands of the logic operation are casts. The casts must be of the
1057 // same type for reduction.
1058 auto CastOpcode = Cast0->getOpcode();
1059 if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy())
1062 Value *Cast0Src = Cast0->getOperand(0);
1063 Value *Cast1Src = Cast1->getOperand(0);
1065 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1066 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1067 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1069 return CastInst::Create(CastOpcode, NewOp, DestTy);
1072 // For now, only 'and'/'or' have optimizations after this.
1073 if (LogicOpc == Instruction::Xor)
1076 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the
1077 // cast is otherwise not optimizable. This happens for vector sexts.
1078 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
1079 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
1080 if (ICmp0 && ICmp1) {
1081 Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I)
1082 : foldOrOfICmps(ICmp0, ICmp1, I);
1084 return CastInst::Create(CastOpcode, Res, DestTy);
1088 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the
1089 // cast is otherwise not optimizable. This happens for vector sexts.
1090 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src);
1091 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src);
1093 if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And))
1094 return CastInst::Create(CastOpcode, R, DestTy);
1099 static Instruction *foldAndToXor(BinaryOperator &I,
1100 InstCombiner::BuilderTy &Builder) {
1101 assert(I.getOpcode() == Instruction::And);
1102 Value *Op0 = I.getOperand(0);
1103 Value *Op1 = I.getOperand(1);
1106 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1107 // (A | B) & ~(A & B) --> A ^ B
1108 // (A | B) & ~(B & A) --> A ^ B
1109 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1110 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B)))))
1111 return BinaryOperator::CreateXor(A, B);
1113 // (A | ~B) & (~A | B) --> ~(A ^ B)
1114 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1115 // (~B | A) & (~A | B) --> ~(A ^ B)
1116 // (~B | A) & (B | ~A) --> ~(A ^ B)
1117 if (Op0->hasOneUse() || Op1->hasOneUse())
1118 if (match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
1119 match(Op1, m_c_Or(m_Not(m_Specific(A)), m_Specific(B))))
1120 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1125 static Instruction *foldOrToXor(BinaryOperator &I,
1126 InstCombiner::BuilderTy &Builder) {
1127 assert(I.getOpcode() == Instruction::Or);
1128 Value *Op0 = I.getOperand(0);
1129 Value *Op1 = I.getOperand(1);
1132 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1133 // (A & B) | ~(A | B) --> ~(A ^ B)
1134 // (A & B) | ~(B | A) --> ~(A ^ B)
1135 if (Op0->hasOneUse() || Op1->hasOneUse())
1136 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1137 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1138 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1140 // (A & ~B) | (~A & B) --> A ^ B
1141 // (A & ~B) | (B & ~A) --> A ^ B
1142 // (~B & A) | (~A & B) --> A ^ B
1143 // (~B & A) | (B & ~A) --> A ^ B
1144 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1145 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1146 return BinaryOperator::CreateXor(A, B);
1151 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1152 // here. We should standardize that construct where it is needed or choose some
1153 // other way to ensure that commutated variants of patterns are not missed.
1154 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
1155 bool Changed = SimplifyAssociativeOrCommutative(I);
1156 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1158 if (Value *V = SimplifyVectorOp(I))
1159 return replaceInstUsesWith(I, V);
1161 if (Value *V = SimplifyAndInst(Op0, Op1, SQ.getWithInstruction(&I)))
1162 return replaceInstUsesWith(I, V);
1164 // See if we can simplify any instructions used by the instruction whose sole
1165 // purpose is to compute bits we don't care about.
1166 if (SimplifyDemandedInstructionBits(I))
1169 // Do this before using distributive laws to catch simple and/or/not patterns.
1170 if (Instruction *Xor = foldAndToXor(I, Builder))
1173 // (A|B)&(A|C) -> A|(B&C) etc
1174 if (Value *V = SimplifyUsingDistributiveLaws(I))
1175 return replaceInstUsesWith(I, V);
1177 if (Value *V = SimplifyBSwap(I, Builder))
1178 return replaceInstUsesWith(I, V);
1181 if (match(Op1, m_APInt(C))) {
1183 if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) &&
1185 // (1 << X) & 1 --> zext(X == 0)
1186 // (1 >> X) & 1 --> zext(X == 0)
1187 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0));
1188 return new ZExtInst(IsZero, I.getType());
1192 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
1193 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
1194 Constant *NewC = ConstantInt::get(I.getType(), *C & *XorC);
1195 Value *And = Builder.CreateAnd(X, Op1);
1197 return BinaryOperator::CreateXor(And, NewC);
1201 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
1202 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
1203 // NOTE: This reduces the number of bits set in the & mask, which
1204 // can expose opportunities for store narrowing for scalars.
1205 // NOTE: SimplifyDemandedBits should have already removed bits from C1
1206 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
1207 // above, but this feels safer.
1208 APInt Together = *C & *OrC;
1209 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(),
1212 return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(),
1216 // If the mask is only needed on one incoming arm, push the 'and' op up.
1217 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
1218 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
1219 APInt NotAndMask(~(*C));
1220 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
1221 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
1222 // Not masking anything out for the LHS, move mask to RHS.
1223 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
1224 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
1225 return BinaryOperator::Create(BinOp, X, NewRHS);
1227 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
1228 // Not masking anything out for the RHS, move mask to LHS.
1229 // and ({x}or X, Y), C --> {x}or (and X, C), Y
1230 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
1231 return BinaryOperator::Create(BinOp, NewLHS, Y);
1237 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1238 const APInt &AndRHSMask = AndRHS->getValue();
1240 // Optimize a variety of ((val OP C1) & C2) combinations...
1241 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1242 // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth
1243 // of X and OP behaves well when given trunc(C1) and X.
1244 switch (Op0I->getOpcode()) {
1247 case Instruction::Xor:
1248 case Instruction::Or:
1249 case Instruction::Mul:
1250 case Instruction::Add:
1251 case Instruction::Sub:
1254 if (match(Op0I, m_c_BinOp(m_ZExt(m_Value(X)), m_ConstantInt(C1)))) {
1255 if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) {
1256 auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
1258 Value *Op0LHS = Op0I->getOperand(0);
1259 if (isa<ZExtInst>(Op0LHS))
1260 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1);
1262 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X);
1263 auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
1264 auto *And = Builder.CreateAnd(BinOp, TruncC2);
1265 return new ZExtInst(And, I.getType());
1270 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1271 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1275 // If this is an integer truncation, and if the source is an 'and' with
1276 // immediate, transform it. This frequently occurs for bitfield accesses.
1278 Value *X = nullptr; ConstantInt *YC = nullptr;
1279 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
1280 // Change: and (trunc (and X, YC) to T), C2
1281 // into : and (trunc X to T), trunc(YC) & C2
1282 // This will fold the two constants together, which may allow
1283 // other simplifications.
1284 Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk");
1285 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
1286 C3 = ConstantExpr::getAnd(C3, AndRHS);
1287 return BinaryOperator::CreateAnd(NewCast, C3);
1292 if (isa<Constant>(Op1))
1293 if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
1296 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
1300 Value *A = nullptr, *B = nullptr, *C = nullptr;
1301 // A&(A^B) => A & ~B
1303 Value *tmpOp0 = Op0;
1304 Value *tmpOp1 = Op1;
1305 if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_Value(B))))) {
1306 if (A == Op1 || B == Op1 ) {
1313 if (match(tmpOp1, m_OneUse(m_Xor(m_Value(A), m_Value(B))))) {
1317 // Notice that the pattern (A&(~B)) is actually (A&(-1^B)), so if
1318 // A is originally -1 (or a vector of -1 and undefs), then we enter
1319 // an endless loop. By checking that A is non-constant we ensure that
1320 // we will never get to the loop.
1321 if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
1322 return BinaryOperator::CreateAnd(A, Builder.CreateNot(B));
1326 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
1327 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1328 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1329 if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
1330 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
1332 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
1333 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1334 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1335 if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
1336 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
1338 // (A | B) & ((~A) ^ B) -> (A & B)
1339 // (A | B) & (B ^ (~A)) -> (A & B)
1340 // (B | A) & ((~A) ^ B) -> (A & B)
1341 // (B | A) & (B ^ (~A)) -> (A & B)
1342 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1343 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1344 return BinaryOperator::CreateAnd(A, B);
1346 // ((~A) ^ B) & (A | B) -> (A & B)
1347 // ((~A) ^ B) & (B | A) -> (A & B)
1348 // (B ^ (~A)) & (A | B) -> (A & B)
1349 // (B ^ (~A)) & (B | A) -> (A & B)
1350 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1351 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1352 return BinaryOperator::CreateAnd(A, B);
1356 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1357 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1359 if (Value *Res = foldAndOfICmps(LHS, RHS, I))
1360 return replaceInstUsesWith(I, Res);
1362 // TODO: Make this recursive; it's a little tricky because an arbitrary
1363 // number of 'and' instructions might have to be created.
1365 if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1366 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1367 if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1368 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1369 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1370 if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1371 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1373 if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1374 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1375 if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1376 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1377 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1378 if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1379 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1383 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1384 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1385 if (Value *Res = foldLogicOfFCmps(LHS, RHS, true))
1386 return replaceInstUsesWith(I, Res);
1388 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
1391 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
1393 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
1394 A->getType()->isIntOrIntVectorTy(1))
1395 return SelectInst::Create(A, Op1, Constant::getNullValue(I.getType()));
1396 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
1397 A->getType()->isIntOrIntVectorTy(1))
1398 return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType()));
1400 return Changed ? &I : nullptr;
1403 /// Given an OR instruction, check to see if this is a bswap idiom. If so,
1404 /// insert the new intrinsic and return it.
1405 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1406 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1408 // Look through zero extends.
1409 if (Instruction *Ext = dyn_cast<ZExtInst>(Op0))
1410 Op0 = Ext->getOperand(0);
1412 if (Instruction *Ext = dyn_cast<ZExtInst>(Op1))
1413 Op1 = Ext->getOperand(0);
1415 // (A | B) | C and A | (B | C) -> bswap if possible.
1416 bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) ||
1417 match(Op1, m_Or(m_Value(), m_Value()));
1419 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1420 bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1421 match(Op1, m_LogicalShift(m_Value(), m_Value()));
1423 // (A & B) | (C & D) -> bswap if possible.
1424 bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) &&
1425 match(Op1, m_And(m_Value(), m_Value()));
1427 if (!OrOfOrs && !OrOfShifts && !OrOfAnds)
1430 SmallVector<Instruction*, 4> Insts;
1431 if (!recognizeBSwapOrBitReverseIdiom(&I, true, false, Insts))
1433 Instruction *LastInst = Insts.pop_back_val();
1434 LastInst->removeFromParent();
1436 for (auto *Inst : Insts)
1441 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
1442 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
1443 unsigned NumElts = C1->getType()->getVectorNumElements();
1444 for (unsigned i = 0; i != NumElts; ++i) {
1445 Constant *EltC1 = C1->getAggregateElement(i);
1446 Constant *EltC2 = C2->getAggregateElement(i);
1447 if (!EltC1 || !EltC2)
1450 // One element must be all ones, and the other must be all zeros.
1451 // FIXME: Allow undef elements.
1452 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
1453 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
1459 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
1460 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
1461 /// B, it can be used as the condition operand of a select instruction.
1462 static Value *getSelectCondition(Value *A, Value *B,
1463 InstCombiner::BuilderTy &Builder) {
1464 // If these are scalars or vectors of i1, A can be used directly.
1465 Type *Ty = A->getType();
1466 if (match(A, m_Not(m_Specific(B))) && Ty->isIntOrIntVectorTy(1))
1469 // If A and B are sign-extended, look through the sexts to find the booleans.
1472 if (match(A, m_SExt(m_Value(Cond))) &&
1473 Cond->getType()->isIntOrIntVectorTy(1) &&
1474 match(B, m_OneUse(m_Not(m_Value(NotB))))) {
1475 NotB = peekThroughBitcast(NotB, true);
1476 if (match(NotB, m_SExt(m_Specific(Cond))))
1480 // All scalar (and most vector) possibilities should be handled now.
1481 // Try more matches that only apply to non-splat constant vectors.
1482 if (!Ty->isVectorTy())
1485 // If both operands are constants, see if the constants are inverse bitmasks.
1487 if (match(A, m_Constant(AC)) && match(B, m_Constant(BC)) &&
1488 areInverseVectorBitmasks(AC, BC)) {
1489 return Builder.CreateZExtOrTrunc(AC, CmpInst::makeCmpResultType(Ty));
1492 // If both operands are xor'd with constants using the same sexted boolean
1493 // operand, see if the constants are inverse bitmasks.
1494 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AC)))) &&
1495 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BC)))) &&
1496 Cond->getType()->isIntOrIntVectorTy(1) &&
1497 areInverseVectorBitmasks(AC, BC)) {
1498 AC = ConstantExpr::getTrunc(AC, CmpInst::makeCmpResultType(Ty));
1499 return Builder.CreateXor(Cond, AC);
1504 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
1505 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
1506 static Value *matchSelectFromAndOr(Value *A, Value *C, Value *B, Value *D,
1507 InstCombiner::BuilderTy &Builder) {
1508 // The potential condition of the select may be bitcasted. In that case, look
1509 // through its bitcast and the corresponding bitcast of the 'not' condition.
1510 Type *OrigType = A->getType();
1511 A = peekThroughBitcast(A, true);
1512 B = peekThroughBitcast(B, true);
1514 if (Value *Cond = getSelectCondition(A, B, Builder)) {
1515 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
1516 // The bitcasts will either all exist or all not exist. The builder will
1517 // not create unnecessary casts if the types already match.
1518 Value *BitcastC = Builder.CreateBitCast(C, A->getType());
1519 Value *BitcastD = Builder.CreateBitCast(D, A->getType());
1520 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
1521 return Builder.CreateBitCast(Select, OrigType);
1527 /// Fold (icmp)|(icmp) if possible.
1528 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1529 Instruction &CxtI) {
1530 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
1531 // if K1 and K2 are a one-bit mask.
1532 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, false, CxtI))
1535 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1537 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
1538 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
1540 // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3)
1541 // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3)
1542 // The original condition actually refers to the following two ranges:
1543 // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3]
1544 // We can fold these two ranges if:
1545 // 1) C1 and C2 is unsigned greater than C3.
1546 // 2) The two ranges are separated.
1547 // 3) C1 ^ C2 is one-bit mask.
1548 // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
1549 // This implies all values in the two ranges differ by exactly one bit.
1551 if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
1552 PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() &&
1553 LHSC->getType() == RHSC->getType() &&
1554 LHSC->getValue() == (RHSC->getValue())) {
1556 Value *LAdd = LHS->getOperand(0);
1557 Value *RAdd = RHS->getOperand(0);
1559 Value *LAddOpnd, *RAddOpnd;
1560 ConstantInt *LAddC, *RAddC;
1561 if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) &&
1562 match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) &&
1563 LAddC->getValue().ugt(LHSC->getValue()) &&
1564 RAddC->getValue().ugt(LHSC->getValue())) {
1566 APInt DiffC = LAddC->getValue() ^ RAddC->getValue();
1567 if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) {
1568 ConstantInt *MaxAddC = nullptr;
1569 if (LAddC->getValue().ult(RAddC->getValue()))
1574 APInt RRangeLow = -RAddC->getValue();
1575 APInt RRangeHigh = RRangeLow + LHSC->getValue();
1576 APInt LRangeLow = -LAddC->getValue();
1577 APInt LRangeHigh = LRangeLow + LHSC->getValue();
1578 APInt LowRangeDiff = RRangeLow ^ LRangeLow;
1579 APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
1580 APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow
1581 : RRangeLow - LRangeLow;
1583 if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff &&
1584 RangeDiff.ugt(LHSC->getValue())) {
1585 Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
1587 Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC);
1588 Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC);
1589 return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC);
1595 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1596 if (PredicatesFoldable(PredL, PredR)) {
1597 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1598 LHS->getOperand(1) == RHS->getOperand(0))
1599 LHS->swapOperands();
1600 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1601 LHS->getOperand(1) == RHS->getOperand(1)) {
1602 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1603 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1604 bool isSigned = LHS->isSigned() || RHS->isSigned();
1605 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
1609 // handle (roughly):
1610 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
1611 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
1614 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
1615 if (LHS->hasOneUse() || RHS->hasOneUse()) {
1616 // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
1617 // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
1618 Value *A = nullptr, *B = nullptr;
1619 if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) {
1621 if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1))
1623 else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0)
1624 A = RHS->getOperand(1);
1626 // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1)
1627 // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1)
1628 else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) {
1630 if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1))
1632 else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0)
1633 A = LHS->getOperand(1);
1636 return Builder.CreateICmp(
1638 Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
1641 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
1642 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
1645 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
1646 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
1649 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder))
1652 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1656 if (LHSC == RHSC && PredL == PredR) {
1657 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1658 if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
1659 Value *NewOr = Builder.CreateOr(LHS0, RHS0);
1660 return Builder.CreateICmp(PredL, NewOr, LHSC);
1664 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
1665 // iff C2 + CA == C1.
1666 if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) {
1668 if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
1669 if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
1670 return Builder.CreateICmpULE(LHS0, LHSC);
1673 // From here on, we only handle:
1674 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1678 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
1679 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
1680 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
1681 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
1682 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
1685 // We can't fold (ugt x, C) | (sgt x, C2).
1686 if (!PredicatesFoldable(PredL, PredR))
1689 // Ensure that the larger constant is on the RHS.
1691 if (CmpInst::isSigned(PredL) ||
1692 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
1693 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
1695 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
1698 std::swap(LHS, RHS);
1699 std::swap(LHSC, RHSC);
1700 std::swap(PredL, PredR);
1703 // At this point, we know we have two icmp instructions
1704 // comparing a value against two constants and or'ing the result
1705 // together. Because of the above check, we know that we only have
1706 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1707 // icmp folding check above), that the two constants are not
1709 assert(LHSC != RHSC && "Compares not folded above?");
1713 llvm_unreachable("Unknown integer condition code!");
1714 case ICmpInst::ICMP_EQ:
1717 llvm_unreachable("Unknown integer condition code!");
1718 case ICmpInst::ICMP_EQ:
1719 // Potential folds for this case should already be handled.
1721 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1722 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1726 case ICmpInst::ICMP_ULT:
1729 llvm_unreachable("Unknown integer condition code!");
1730 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1732 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1733 assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
1734 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
1738 case ICmpInst::ICMP_SLT:
1741 llvm_unreachable("Unknown integer condition code!");
1742 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1744 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1745 assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
1746 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
1754 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1755 // here. We should standardize that construct where it is needed or choose some
1756 // other way to ensure that commutated variants of patterns are not missed.
1757 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
1758 bool Changed = SimplifyAssociativeOrCommutative(I);
1759 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1761 if (Value *V = SimplifyVectorOp(I))
1762 return replaceInstUsesWith(I, V);
1764 if (Value *V = SimplifyOrInst(Op0, Op1, SQ.getWithInstruction(&I)))
1765 return replaceInstUsesWith(I, V);
1767 // See if we can simplify any instructions used by the instruction whose sole
1768 // purpose is to compute bits we don't care about.
1769 if (SimplifyDemandedInstructionBits(I))
1772 // Do this before using distributive laws to catch simple and/or/not patterns.
1773 if (Instruction *Xor = foldOrToXor(I, Builder))
1776 // (A&B)|(A&C) -> A&(B|C) etc
1777 if (Value *V = SimplifyUsingDistributiveLaws(I))
1778 return replaceInstUsesWith(I, V);
1780 if (Value *V = SimplifyBSwap(I, Builder))
1781 return replaceInstUsesWith(I, V);
1783 if (isa<Constant>(Op1))
1784 if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
1787 // Given an OR instruction, check to see if this is a bswap.
1788 if (Instruction *BSwap = MatchBSwap(I))
1794 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1795 if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
1796 MaskedValueIsZero(Op1, *C, 0, &I)) {
1797 Value *NOr = Builder.CreateOr(A, Op1);
1799 return BinaryOperator::CreateXor(NOr,
1800 ConstantInt::get(NOr->getType(), *C));
1803 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
1804 if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
1805 MaskedValueIsZero(Op0, *C, 0, &I)) {
1806 Value *NOr = Builder.CreateOr(A, Op0);
1808 return BinaryOperator::CreateXor(NOr,
1809 ConstantInt::get(NOr->getType(), *C));
1816 Value *C = nullptr, *D = nullptr;
1817 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1818 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1819 ConstantInt *C1 = dyn_cast<ConstantInt>(C);
1820 ConstantInt *C2 = dyn_cast<ConstantInt>(D);
1821 if (C1 && C2) { // (A & C1)|(B & C2)
1822 Value *V1 = nullptr, *V2 = nullptr;
1823 if ((C1->getValue() & C2->getValue()).isNullValue()) {
1824 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
1825 // iff (C1&C2) == 0 and (N&~C1) == 0
1826 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
1828 MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N)
1830 MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
1831 return BinaryOperator::CreateAnd(A,
1832 Builder.getInt(C1->getValue()|C2->getValue()));
1833 // Or commutes, try both ways.
1834 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
1836 MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N)
1838 MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
1839 return BinaryOperator::CreateAnd(B,
1840 Builder.getInt(C1->getValue()|C2->getValue()));
1842 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
1843 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
1844 ConstantInt *C3 = nullptr, *C4 = nullptr;
1845 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
1846 (C3->getValue() & ~C1->getValue()).isNullValue() &&
1847 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
1848 (C4->getValue() & ~C2->getValue()).isNullValue()) {
1849 V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
1850 return BinaryOperator::CreateAnd(V2,
1851 Builder.getInt(C1->getValue()|C2->getValue()));
1855 if (C1->getValue() == ~C2->getValue()) {
1858 // ((X|B)&C1)|(B&C2) -> (X&C1) | B iff C1 == ~C2
1859 if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
1860 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C1), B);
1861 // (A&C2)|((X|A)&C1) -> (X&C2) | A iff C1 == ~C2
1862 if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
1863 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C2), A);
1865 // ((X^B)&C1)|(B&C2) -> (X&C1) ^ B iff C1 == ~C2
1866 if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
1867 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C1), B);
1868 // (A&C2)|((X^A)&C1) -> (X&C2) ^ A iff C1 == ~C2
1869 if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
1870 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C2), A);
1874 // Don't try to form a select if it's unlikely that we'll get rid of at
1875 // least one of the operands. A select is generally more expensive than the
1876 // 'or' that it is replacing.
1877 if (Op0->hasOneUse() || Op1->hasOneUse()) {
1878 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
1879 if (Value *V = matchSelectFromAndOr(A, C, B, D, Builder))
1880 return replaceInstUsesWith(I, V);
1881 if (Value *V = matchSelectFromAndOr(A, C, D, B, Builder))
1882 return replaceInstUsesWith(I, V);
1883 if (Value *V = matchSelectFromAndOr(C, A, B, D, Builder))
1884 return replaceInstUsesWith(I, V);
1885 if (Value *V = matchSelectFromAndOr(C, A, D, B, Builder))
1886 return replaceInstUsesWith(I, V);
1887 if (Value *V = matchSelectFromAndOr(B, D, A, C, Builder))
1888 return replaceInstUsesWith(I, V);
1889 if (Value *V = matchSelectFromAndOr(B, D, C, A, Builder))
1890 return replaceInstUsesWith(I, V);
1891 if (Value *V = matchSelectFromAndOr(D, B, A, C, Builder))
1892 return replaceInstUsesWith(I, V);
1893 if (Value *V = matchSelectFromAndOr(D, B, C, A, Builder))
1894 return replaceInstUsesWith(I, V);
1898 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
1899 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1900 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1901 return BinaryOperator::CreateOr(Op0, C);
1903 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
1904 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1905 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1906 return BinaryOperator::CreateOr(Op1, C);
1908 // ((B | C) & A) | B -> B | (A & C)
1909 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
1910 return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
1912 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
1915 // Canonicalize xor to the RHS.
1916 bool SwappedForXor = false;
1917 if (match(Op0, m_Xor(m_Value(), m_Value()))) {
1918 std::swap(Op0, Op1);
1919 SwappedForXor = true;
1922 // A | ( A ^ B) -> A | B
1923 // A | (~A ^ B) -> A | ~B
1924 // (A & B) | (A ^ B)
1925 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
1926 if (Op0 == A || Op0 == B)
1927 return BinaryOperator::CreateOr(A, B);
1929 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
1930 match(Op0, m_And(m_Specific(B), m_Specific(A))))
1931 return BinaryOperator::CreateOr(A, B);
1933 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
1934 Value *Not = Builder.CreateNot(B, B->getName() + ".not");
1935 return BinaryOperator::CreateOr(Not, Op0);
1937 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
1938 Value *Not = Builder.CreateNot(A, A->getName() + ".not");
1939 return BinaryOperator::CreateOr(Not, Op0);
1943 // A | ~(A | B) -> A | ~B
1944 // A | ~(A ^ B) -> A | ~B
1945 if (match(Op1, m_Not(m_Value(A))))
1946 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
1947 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
1948 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
1949 B->getOpcode() == Instruction::Xor)) {
1950 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
1952 Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
1953 return BinaryOperator::CreateOr(Not, Op0);
1957 std::swap(Op0, Op1);
1960 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1961 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1963 if (Value *Res = foldOrOfICmps(LHS, RHS, I))
1964 return replaceInstUsesWith(I, Res);
1966 // TODO: Make this recursive; it's a little tricky because an arbitrary
1967 // number of 'or' instructions might have to be created.
1969 if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
1970 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1971 if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
1972 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
1973 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1974 if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
1975 return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
1977 if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
1978 if (auto *Cmp = dyn_cast<ICmpInst>(X))
1979 if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
1980 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
1981 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1982 if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
1983 return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
1987 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1988 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1989 if (Value *Res = foldLogicOfFCmps(LHS, RHS, false))
1990 return replaceInstUsesWith(I, Res);
1992 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
1995 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
1996 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
1997 A->getType()->isIntOrIntVectorTy(1))
1998 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
1999 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
2000 A->getType()->isIntOrIntVectorTy(1))
2001 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
2003 // Note: If we've gotten to the point of visiting the outer OR, then the
2004 // inner one couldn't be simplified. If it was a constant, then it won't
2005 // be simplified by a later pass either, so we try swapping the inner/outer
2006 // ORs in the hopes that we'll be able to simplify it this way.
2007 // (X|C) | V --> (X|V) | C
2009 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
2010 match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
2011 Value *Inner = Builder.CreateOr(A, Op1);
2012 Inner->takeName(Op0);
2013 return BinaryOperator::CreateOr(Inner, C1);
2016 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
2017 // Since this OR statement hasn't been optimized further yet, we hope
2018 // that this transformation will allow the new ORs to be optimized.
2020 Value *X = nullptr, *Y = nullptr;
2021 if (Op0->hasOneUse() && Op1->hasOneUse() &&
2022 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
2023 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
2024 Value *orTrue = Builder.CreateOr(A, C);
2025 Value *orFalse = Builder.CreateOr(B, D);
2026 return SelectInst::Create(X, orTrue, orFalse);
2030 return Changed ? &I : nullptr;
2033 /// A ^ B can be specified using other logic ops in a variety of patterns. We
2034 /// can fold these early and efficiently by morphing an existing instruction.
2035 static Instruction *foldXorToXor(BinaryOperator &I,
2036 InstCombiner::BuilderTy &Builder) {
2037 assert(I.getOpcode() == Instruction::Xor);
2038 Value *Op0 = I.getOperand(0);
2039 Value *Op1 = I.getOperand(1);
2042 // There are 4 commuted variants for each of the basic patterns.
2044 // (A & B) ^ (A | B) -> A ^ B
2045 // (A & B) ^ (B | A) -> A ^ B
2046 // (A | B) ^ (A & B) -> A ^ B
2047 // (A | B) ^ (B & A) -> A ^ B
2048 if ((match(Op0, m_And(m_Value(A), m_Value(B))) &&
2049 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) ||
2050 (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2051 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))) {
2057 // (A | ~B) ^ (~A | B) -> A ^ B
2058 // (~B | A) ^ (~A | B) -> A ^ B
2059 // (~A | B) ^ (A | ~B) -> A ^ B
2060 // (B | ~A) ^ (A | ~B) -> A ^ B
2061 if ((match(Op0, m_Or(m_Value(A), m_Not(m_Value(B)))) &&
2062 match(Op1, m_c_Or(m_Not(m_Specific(A)), m_Specific(B)))) ||
2063 (match(Op0, m_Or(m_Not(m_Value(A)), m_Value(B))) &&
2064 match(Op1, m_c_Or(m_Specific(A), m_Not(m_Specific(B)))))) {
2070 // (A & ~B) ^ (~A & B) -> A ^ B
2071 // (~B & A) ^ (~A & B) -> A ^ B
2072 // (~A & B) ^ (A & ~B) -> A ^ B
2073 // (B & ~A) ^ (A & ~B) -> A ^ B
2074 if ((match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
2075 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))) ||
2076 (match(Op0, m_And(m_Not(m_Value(A)), m_Value(B))) &&
2077 match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))))) {
2083 // For the remaining cases we need to get rid of one of the operands.
2084 if (!Op0->hasOneUse() && !Op1->hasOneUse())
2087 // (A | B) ^ ~(A & B) -> ~(A ^ B)
2088 // (A | B) ^ ~(B & A) -> ~(A ^ B)
2089 // (A & B) ^ ~(A | B) -> ~(A ^ B)
2090 // (A & B) ^ ~(B | A) -> ~(A ^ B)
2091 // Complexity sorting ensures the not will be on the right side.
2092 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2093 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
2094 (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2095 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
2096 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
2101 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
2102 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2103 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2104 LHS->getOperand(1) == RHS->getOperand(0))
2105 LHS->swapOperands();
2106 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2107 LHS->getOperand(1) == RHS->getOperand(1)) {
2108 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2109 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2110 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2111 bool isSigned = LHS->isSigned() || RHS->isSigned();
2112 return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
2116 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
2117 // into those logic ops. That is, try to turn this into an and-of-icmps
2118 // because we have many folds for that pattern.
2120 // This is based on a truth table definition of xor:
2121 // X ^ Y --> (X | Y) & !(X & Y)
2122 if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
2123 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
2124 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
2125 if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
2126 // TODO: Independently handle cases where the 'and' side is a constant.
2127 if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) {
2128 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS
2129 RHS->setPredicate(RHS->getInversePredicate());
2130 return Builder.CreateAnd(LHS, RHS);
2132 if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) {
2133 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS
2134 LHS->setPredicate(LHS->getInversePredicate());
2135 return Builder.CreateAnd(LHS, RHS);
2143 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2144 // here. We should standardize that construct where it is needed or choose some
2145 // other way to ensure that commutated variants of patterns are not missed.
2146 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
2147 bool Changed = SimplifyAssociativeOrCommutative(I);
2148 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2150 if (Value *V = SimplifyVectorOp(I))
2151 return replaceInstUsesWith(I, V);
2153 if (Value *V = SimplifyXorInst(Op0, Op1, SQ.getWithInstruction(&I)))
2154 return replaceInstUsesWith(I, V);
2156 if (Instruction *NewXor = foldXorToXor(I, Builder))
2159 // (A&B)^(A&C) -> A&(B^C) etc
2160 if (Value *V = SimplifyUsingDistributiveLaws(I))
2161 return replaceInstUsesWith(I, V);
2163 // See if we can simplify any instructions used by the instruction whose sole
2164 // purpose is to compute bits we don't care about.
2165 if (SimplifyDemandedInstructionBits(I))
2168 if (Value *V = SimplifyBSwap(I, Builder))
2169 return replaceInstUsesWith(I, V);
2171 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
2174 // We must eliminate the and/or (one-use) for these transforms to not increase
2175 // the instruction count.
2176 // ~(~X & Y) --> (X | ~Y)
2177 // ~(Y & ~X) --> (X | ~Y)
2178 if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
2179 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2180 return BinaryOperator::CreateOr(X, NotY);
2182 // ~(~X | Y) --> (X & ~Y)
2183 // ~(Y | ~X) --> (X & ~Y)
2184 if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
2185 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2186 return BinaryOperator::CreateAnd(X, NotY);
2189 // Is this a 'not' (~) fed by a binary operator?
2190 BinaryOperator *NotVal;
2191 if (match(&I, m_Not(m_BinOp(NotVal)))) {
2192 if (NotVal->getOpcode() == Instruction::And ||
2193 NotVal->getOpcode() == Instruction::Or) {
2194 // Apply DeMorgan's Law when inverts are free:
2195 // ~(X & Y) --> (~X | ~Y)
2196 // ~(X | Y) --> (~X & ~Y)
2197 if (IsFreeToInvert(NotVal->getOperand(0),
2198 NotVal->getOperand(0)->hasOneUse()) &&
2199 IsFreeToInvert(NotVal->getOperand(1),
2200 NotVal->getOperand(1)->hasOneUse())) {
2201 Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs");
2202 Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs");
2203 if (NotVal->getOpcode() == Instruction::And)
2204 return BinaryOperator::CreateOr(NotX, NotY);
2205 return BinaryOperator::CreateAnd(NotX, NotY);
2209 // ~(~X >>s Y) --> (X >>s Y)
2210 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
2211 return BinaryOperator::CreateAShr(X, Y);
2213 // If we are inverting a right-shifted constant, we may be able to eliminate
2214 // the 'not' by inverting the constant and using the opposite shift type.
2215 // Canonicalization rules ensure that only a negative constant uses 'ashr',
2216 // but we must check that in case that transform has not fired yet.
2218 if (match(NotVal, m_AShr(m_APInt(C), m_Value(Y))) && C->isNegative()) {
2219 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
2220 Constant *NotC = ConstantInt::get(I.getType(), ~(*C));
2221 return BinaryOperator::CreateLShr(NotC, Y);
2224 if (match(NotVal, m_LShr(m_APInt(C), m_Value(Y))) && C->isNonNegative()) {
2225 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
2226 Constant *NotC = ConstantInt::get(I.getType(), ~(*C));
2227 return BinaryOperator::CreateAShr(NotC, Y);
2231 // not (cmp A, B) = !cmp A, B
2232 CmpInst::Predicate Pred;
2233 if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) {
2234 cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred));
2235 return replaceInstUsesWith(I, Op0);
2240 if (match(Op1, m_APInt(RHSC))) {
2243 if (match(Op0, m_Sub(m_APInt(C), m_Value(X)))) {
2244 // ~(c-X) == X-c-1 == X+(-c-1)
2245 if (RHSC->isAllOnesValue()) {
2246 Constant *NewC = ConstantInt::get(I.getType(), -(*C) - 1);
2247 return BinaryOperator::CreateAdd(X, NewC);
2249 if (RHSC->isSignMask()) {
2250 // (C - X) ^ signmask -> (C + signmask - X)
2251 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
2252 return BinaryOperator::CreateSub(NewC, X);
2254 } else if (match(Op0, m_Add(m_Value(X), m_APInt(C)))) {
2255 // ~(X-c) --> (-c-1)-X
2256 if (RHSC->isAllOnesValue()) {
2257 Constant *NewC = ConstantInt::get(I.getType(), -(*C) - 1);
2258 return BinaryOperator::CreateSub(NewC, X);
2260 if (RHSC->isSignMask()) {
2261 // (X + C) ^ signmask -> (X + C + signmask)
2262 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
2263 return BinaryOperator::CreateAdd(X, NewC);
2267 // (X|C1)^C2 -> X^(C1^C2) iff X&~C1 == 0
2268 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
2269 MaskedValueIsZero(X, *C, 0, &I)) {
2270 Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC);
2271 Worklist.Add(cast<Instruction>(Op0));
2273 I.setOperand(1, NewC);
2279 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) {
2280 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2281 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2282 if (Op0I->getOpcode() == Instruction::LShr) {
2283 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
2287 if (Op0I->hasOneUse() &&
2288 (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) &&
2289 E1->getOpcode() == Instruction::Xor &&
2290 (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) {
2291 // fold (C1 >> C2) ^ C3
2292 ConstantInt *C2 = Op0CI, *C3 = RHSC;
2293 APInt FoldConst = C1->getValue().lshr(C2->getValue());
2294 FoldConst ^= C3->getValue();
2295 // Prepare the two operands.
2296 Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2);
2297 Opnd0->takeName(Op0I);
2298 cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
2299 Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
2301 return BinaryOperator::CreateXor(Opnd0, FoldVal);
2308 if (isa<Constant>(Op1))
2309 if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
2314 if (match(Op1, m_OneUse(m_Or(m_Value(A), m_Value(B))))) {
2315 if (A == Op0) { // A^(A|B) == A^(B|A)
2316 cast<BinaryOperator>(Op1)->swapOperands();
2319 if (B == Op0) { // A^(B|A) == (B|A)^A
2320 I.swapOperands(); // Simplified below.
2321 std::swap(Op0, Op1);
2323 } else if (match(Op1, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
2324 if (A == Op0) { // A^(A&B) -> A^(B&A)
2325 cast<BinaryOperator>(Op1)->swapOperands();
2328 if (B == Op0) { // A^(B&A) -> (B&A)^A
2329 I.swapOperands(); // Simplified below.
2330 std::swap(Op0, Op1);
2337 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B))))) {
2338 if (A == Op1) // (B|A)^B == (A|B)^B
2340 if (B == Op1) // (A|B)^B == A & ~B
2341 return BinaryOperator::CreateAnd(A, Builder.CreateNot(Op1));
2342 } else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
2343 if (A == Op1) // (A&B)^A -> (B&A)^A
2346 if (B == Op1 && // (B&A)^A == ~B & A
2347 !match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C
2348 return BinaryOperator::CreateAnd(Builder.CreateNot(A), Op1);
2354 Value *A, *B, *C, *D;
2355 // (A ^ C)^(A | B) -> ((~A) & B) ^ C
2356 if (match(Op0, m_Xor(m_Value(D), m_Value(C))) &&
2357 match(Op1, m_Or(m_Value(A), m_Value(B)))) {
2359 return BinaryOperator::CreateXor(
2360 Builder.CreateAnd(Builder.CreateNot(A), B), C);
2362 return BinaryOperator::CreateXor(
2363 Builder.CreateAnd(Builder.CreateNot(B), A), C);
2365 // (A | B)^(A ^ C) -> ((~A) & B) ^ C
2366 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2367 match(Op1, m_Xor(m_Value(D), m_Value(C)))) {
2369 return BinaryOperator::CreateXor(
2370 Builder.CreateAnd(Builder.CreateNot(A), B), C);
2372 return BinaryOperator::CreateXor(
2373 Builder.CreateAnd(Builder.CreateNot(B), A), C);
2375 // (A & B) ^ (A ^ B) -> (A | B)
2376 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2377 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2378 return BinaryOperator::CreateOr(A, B);
2379 // (A ^ B) ^ (A & B) -> (A | B)
2380 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2381 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
2382 return BinaryOperator::CreateOr(A, B);
2385 // (A & ~B) ^ ~A -> ~(A & B)
2386 // (~B & A) ^ ~A -> ~(A & B)
2388 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2389 match(Op1, m_Not(m_Specific(A))))
2390 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
2392 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2393 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2394 if (Value *V = foldXorOfICmps(LHS, RHS))
2395 return replaceInstUsesWith(I, V);
2397 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
2400 // Canonicalize the shifty way to code absolute value to the common pattern.
2401 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
2402 // We're relying on the fact that we only do this transform when the shift has
2403 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
2405 if (Op0->getNumUses() == 2)
2406 std::swap(Op0, Op1);
2409 Type *Ty = I.getType();
2410 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2411 Op1->getNumUses() == 2 && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
2412 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
2413 // B = ashr i32 A, 31 ; smear the sign bit
2414 // xor (add A, B), B ; add -1 and flip bits if negative
2415 // --> (A < 0) ? -A : A
2416 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty));
2417 return SelectInst::Create(Cmp, Builder.CreateNeg(A), A);
2420 return Changed ? &I : nullptr;