1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/GetElementPtrTypeIterator.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Statepoint.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
45 using namespace llvm::PatternMatch;
47 const unsigned MaxDepth = 6;
49 // Controls the number of uses of the value searched for possible
50 // dominating comparisons.
51 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
52 cl::Hidden, cl::init(20));
54 // This optimization is known to cause performance regressions is some cases,
55 // keep it under a temporary flag for now.
57 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
58 cl::Hidden, cl::init(true));
60 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
61 /// 0). For vector types, returns the element type's bitwidth.
62 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
63 if (unsigned BitWidth = Ty->getScalarSizeInBits())
66 return DL.getPointerTypeSizeInBits(Ty);
70 // Simplifying using an assume can only be done in a particular control-flow
71 // context (the context instruction provides that context). If an assume and
72 // the context instruction are not in the same block then the DT helps in
73 // figuring out if we can use it.
77 const Instruction *CxtI;
78 const DominatorTree *DT;
80 /// Set of assumptions that should be excluded from further queries.
81 /// This is because of the potential for mutual recursion to cause
82 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
83 /// classic case of this is assume(x = y), which will attempt to determine
84 /// bits in x from bits in y, which will attempt to determine bits in y from
85 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
86 /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
87 /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
89 std::array<const Value *, MaxDepth> Excluded;
92 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
93 const DominatorTree *DT)
94 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
96 Query(const Query &Q, const Value *NewExcl)
97 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
98 Excluded = Q.Excluded;
99 Excluded[NumExcluded++] = NewExcl;
100 assert(NumExcluded <= Excluded.size());
103 bool isExcluded(const Value *Value) const {
104 if (NumExcluded == 0)
106 auto End = Excluded.begin() + NumExcluded;
107 return std::find(Excluded.begin(), End, Value) != End;
110 } // end anonymous namespace
112 // Given the provided Value and, potentially, a context instruction, return
113 // the preferred context instruction (if any).
114 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
115 // If we've been provided with a context instruction, then use that (provided
116 // it has been inserted).
117 if (CxtI && CxtI->getParent())
120 // If the value is really an already-inserted instruction, then use that.
121 CxtI = dyn_cast<Instruction>(V);
122 if (CxtI && CxtI->getParent())
128 static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
129 unsigned Depth, const Query &Q);
131 void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
132 const DataLayout &DL, unsigned Depth,
133 AssumptionCache *AC, const Instruction *CxtI,
134 const DominatorTree *DT) {
135 ::computeKnownBits(V, KnownZero, KnownOne, Depth,
136 Query(DL, AC, safeCxtI(V, CxtI), DT));
139 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
140 const DataLayout &DL,
141 AssumptionCache *AC, const Instruction *CxtI,
142 const DominatorTree *DT) {
143 assert(LHS->getType() == RHS->getType() &&
144 "LHS and RHS should have the same type");
145 assert(LHS->getType()->isIntOrIntVectorTy() &&
146 "LHS and RHS should be integers");
147 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
148 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
149 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
150 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
151 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
152 return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
155 static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
156 unsigned Depth, const Query &Q);
158 void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
159 const DataLayout &DL, unsigned Depth,
160 AssumptionCache *AC, const Instruction *CxtI,
161 const DominatorTree *DT) {
162 ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
163 Query(DL, AC, safeCxtI(V, CxtI), DT));
166 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
169 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
171 unsigned Depth, AssumptionCache *AC,
172 const Instruction *CxtI,
173 const DominatorTree *DT) {
174 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
175 Query(DL, AC, safeCxtI(V, CxtI), DT));
178 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
180 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
181 AssumptionCache *AC, const Instruction *CxtI,
182 const DominatorTree *DT) {
183 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
186 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
188 AssumptionCache *AC, const Instruction *CxtI,
189 const DominatorTree *DT) {
190 bool NonNegative, Negative;
191 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
195 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
196 AssumptionCache *AC, const Instruction *CxtI,
197 const DominatorTree *DT) {
198 if (auto *CI = dyn_cast<ConstantInt>(V))
199 return CI->getValue().isStrictlyPositive();
201 // TODO: We'd doing two recursive queries here. We should factor this such
202 // that only a single query is needed.
203 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
204 isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
207 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
208 AssumptionCache *AC, const Instruction *CxtI,
209 const DominatorTree *DT) {
210 bool NonNegative, Negative;
211 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
215 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
217 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
218 const DataLayout &DL,
219 AssumptionCache *AC, const Instruction *CxtI,
220 const DominatorTree *DT) {
221 return ::isKnownNonEqual(V1, V2, Query(DL, AC,
222 safeCxtI(V1, safeCxtI(V2, CxtI)),
226 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
229 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
230 const DataLayout &DL,
231 unsigned Depth, AssumptionCache *AC,
232 const Instruction *CxtI, const DominatorTree *DT) {
233 return ::MaskedValueIsZero(V, Mask, Depth,
234 Query(DL, AC, safeCxtI(V, CxtI), DT));
237 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
240 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
241 unsigned Depth, AssumptionCache *AC,
242 const Instruction *CxtI,
243 const DominatorTree *DT) {
244 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
247 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
249 APInt &KnownZero, APInt &KnownOne,
250 APInt &KnownZero2, APInt &KnownOne2,
251 unsigned Depth, const Query &Q) {
253 if (const ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
254 // We know that the top bits of C-X are clear if X contains less bits
255 // than C (i.e. no wrap-around can happen). For example, 20-X is
256 // positive if we can prove that X is >= 0 and < 16.
257 if (!CLHS->getValue().isNegative()) {
258 unsigned BitWidth = KnownZero.getBitWidth();
259 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
260 // NLZ can't be BitWidth with no sign bit
261 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
262 computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
264 // If all of the MaskV bits are known to be zero, then we know the
265 // output top bits are zero, because we now know that the output is
267 if ((KnownZero2 & MaskV) == MaskV) {
268 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
269 // Top bits known zero.
270 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
276 unsigned BitWidth = KnownZero.getBitWidth();
278 // If an initial sequence of bits in the result is not needed, the
279 // corresponding bits in the operands are not needed.
280 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
281 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
282 computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
284 // Carry in a 1 for a subtract, rather than a 0.
285 APInt CarryIn(BitWidth, 0);
287 // Sum = LHS + ~RHS + 1
288 std::swap(KnownZero2, KnownOne2);
292 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
293 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
295 // Compute known bits of the carry.
296 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
297 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
299 // Compute set of known bits (where all three relevant bits are known).
300 APInt LHSKnown = LHSKnownZero | LHSKnownOne;
301 APInt RHSKnown = KnownZero2 | KnownOne2;
302 APInt CarryKnown = CarryKnownZero | CarryKnownOne;
303 APInt Known = LHSKnown & RHSKnown & CarryKnown;
305 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
306 "known bits of sum differ");
308 // Compute known bits of the result.
309 KnownZero = ~PossibleSumOne & Known;
310 KnownOne = PossibleSumOne & Known;
312 // Are we still trying to solve for the sign bit?
313 if (!Known.isNegative()) {
315 // Adding two non-negative numbers, or subtracting a negative number from
316 // a non-negative one, can't wrap into negative.
317 if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
318 KnownZero |= APInt::getSignBit(BitWidth);
319 // Adding two negative numbers, or subtracting a non-negative number from
320 // a negative one, can't wrap into non-negative.
321 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
322 KnownOne |= APInt::getSignBit(BitWidth);
327 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
328 APInt &KnownZero, APInt &KnownOne,
329 APInt &KnownZero2, APInt &KnownOne2,
330 unsigned Depth, const Query &Q) {
331 unsigned BitWidth = KnownZero.getBitWidth();
332 computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
333 computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
335 bool isKnownNegative = false;
336 bool isKnownNonNegative = false;
337 // If the multiplication is known not to overflow, compute the sign bit.
340 // The product of a number with itself is non-negative.
341 isKnownNonNegative = true;
343 bool isKnownNonNegativeOp1 = KnownZero.isNegative();
344 bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
345 bool isKnownNegativeOp1 = KnownOne.isNegative();
346 bool isKnownNegativeOp0 = KnownOne2.isNegative();
347 // The product of two numbers with the same sign is non-negative.
348 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
349 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
350 // The product of a negative number and a non-negative number is either
352 if (!isKnownNonNegative)
353 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
354 isKnownNonZero(Op0, Depth, Q)) ||
355 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
356 isKnownNonZero(Op1, Depth, Q));
360 // If low bits are zero in either operand, output low known-0 bits.
361 // Also compute a conservative estimate for high known-0 bits.
362 // More trickiness is possible, but this is sufficient for the
363 // interesting case of alignment computation.
364 KnownOne.clearAllBits();
365 unsigned TrailZ = KnownZero.countTrailingOnes() +
366 KnownZero2.countTrailingOnes();
367 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
368 KnownZero2.countLeadingOnes(),
369 BitWidth) - BitWidth;
371 TrailZ = std::min(TrailZ, BitWidth);
372 LeadZ = std::min(LeadZ, BitWidth);
373 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
374 APInt::getHighBitsSet(BitWidth, LeadZ);
376 // Only make use of no-wrap flags if we failed to compute the sign bit
377 // directly. This matters if the multiplication always overflows, in
378 // which case we prefer to follow the result of the direct computation,
379 // though as the program is invoking undefined behaviour we can choose
380 // whatever we like here.
381 if (isKnownNonNegative && !KnownOne.isNegative())
382 KnownZero.setBit(BitWidth - 1);
383 else if (isKnownNegative && !KnownZero.isNegative())
384 KnownOne.setBit(BitWidth - 1);
387 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
390 unsigned BitWidth = KnownZero.getBitWidth();
391 unsigned NumRanges = Ranges.getNumOperands() / 2;
392 assert(NumRanges >= 1);
394 KnownZero.setAllBits();
395 KnownOne.setAllBits();
397 for (unsigned i = 0; i < NumRanges; ++i) {
399 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
401 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
402 ConstantRange Range(Lower->getValue(), Upper->getValue());
404 // The first CommonPrefixBits of all values in Range are equal.
405 unsigned CommonPrefixBits =
406 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
408 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
409 KnownOne &= Range.getUnsignedMax() & Mask;
410 KnownZero &= ~Range.getUnsignedMax() & Mask;
414 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
415 SmallVector<const Value *, 16> WorkSet(1, I);
416 SmallPtrSet<const Value *, 32> Visited;
417 SmallPtrSet<const Value *, 16> EphValues;
419 // The instruction defining an assumption's condition itself is always
420 // considered ephemeral to that assumption (even if it has other
421 // non-ephemeral users). See r246696's test case for an example.
422 if (is_contained(I->operands(), E))
425 while (!WorkSet.empty()) {
426 const Value *V = WorkSet.pop_back_val();
427 if (!Visited.insert(V).second)
430 // If all uses of this value are ephemeral, then so is this value.
431 if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
436 if (const User *U = dyn_cast<User>(V))
437 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
439 if (isSafeToSpeculativelyExecute(*J))
440 WorkSet.push_back(*J);
448 // Is this an intrinsic that cannot be speculated but also cannot trap?
449 static bool isAssumeLikeIntrinsic(const Instruction *I) {
450 if (const CallInst *CI = dyn_cast<CallInst>(I))
451 if (Function *F = CI->getCalledFunction())
452 switch (F->getIntrinsicID()) {
454 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
455 case Intrinsic::assume:
456 case Intrinsic::dbg_declare:
457 case Intrinsic::dbg_value:
458 case Intrinsic::invariant_start:
459 case Intrinsic::invariant_end:
460 case Intrinsic::lifetime_start:
461 case Intrinsic::lifetime_end:
462 case Intrinsic::objectsize:
463 case Intrinsic::ptr_annotation:
464 case Intrinsic::var_annotation:
471 bool llvm::isValidAssumeForContext(const Instruction *Inv,
472 const Instruction *CxtI,
473 const DominatorTree *DT) {
475 // There are two restrictions on the use of an assume:
476 // 1. The assume must dominate the context (or the control flow must
477 // reach the assume whenever it reaches the context).
478 // 2. The context must not be in the assume's set of ephemeral values
479 // (otherwise we will use the assume to prove that the condition
480 // feeding the assume is trivially true, thus causing the removal of
484 if (DT->dominates(Inv, CxtI))
486 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
487 // We don't have a DT, but this trivially dominates.
491 // With or without a DT, the only remaining case we will check is if the
492 // instructions are in the same BB. Give up if that is not the case.
493 if (Inv->getParent() != CxtI->getParent())
496 // If we have a dom tree, then we now know that the assume doens't dominate
497 // the other instruction. If we don't have a dom tree then we can check if
498 // the assume is first in the BB.
500 // Search forward from the assume until we reach the context (or the end
501 // of the block); the common case is that the assume will come first.
502 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
503 IE = Inv->getParent()->end(); I != IE; ++I)
508 // The context comes first, but they're both in the same block. Make sure
509 // there is nothing in between that might interrupt the control flow.
510 for (BasicBlock::const_iterator I =
511 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
513 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
516 return !isEphemeralValueOf(Inv, CxtI);
519 static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
520 APInt &KnownOne, unsigned Depth,
522 // Use of assumptions is context-sensitive. If we don't have a context, we
524 if (!Q.AC || !Q.CxtI)
527 unsigned BitWidth = KnownZero.getBitWidth();
529 for (auto &AssumeVH : Q.AC->assumptions()) {
532 CallInst *I = cast<CallInst>(AssumeVH);
533 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
534 "Got assumption for the wrong function!");
538 // Warning: This loop can end up being somewhat performance sensetive.
539 // We're running this loop for once for each value queried resulting in a
540 // runtime of ~O(#assumes * #values).
542 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
543 "must be an assume intrinsic");
545 Value *Arg = I->getArgOperand(0);
547 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
548 assert(BitWidth == 1 && "assume operand is not i1?");
549 KnownZero.clearAllBits();
550 KnownOne.setAllBits();
554 // The remaining tests are all recursive, so bail out if we hit the limit.
555 if (Depth == MaxDepth)
559 auto m_V = m_CombineOr(m_Specific(V),
560 m_CombineOr(m_PtrToInt(m_Specific(V)),
561 m_BitCast(m_Specific(V))));
563 CmpInst::Predicate Pred;
566 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
567 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
568 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
569 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
570 KnownZero |= RHSKnownZero;
571 KnownOne |= RHSKnownOne;
573 } else if (match(Arg,
574 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
575 Pred == ICmpInst::ICMP_EQ &&
576 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
577 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
578 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
579 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
580 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
582 // For those bits in the mask that are known to be one, we can propagate
583 // known bits from the RHS to V.
584 KnownZero |= RHSKnownZero & MaskKnownOne;
585 KnownOne |= RHSKnownOne & MaskKnownOne;
586 // assume(~(v & b) = a)
587 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
589 Pred == ICmpInst::ICMP_EQ &&
590 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
591 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
592 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
593 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
594 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
596 // For those bits in the mask that are known to be one, we can propagate
597 // inverted known bits from the RHS to V.
598 KnownZero |= RHSKnownOne & MaskKnownOne;
599 KnownOne |= RHSKnownZero & MaskKnownOne;
601 } else if (match(Arg,
602 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
603 Pred == ICmpInst::ICMP_EQ &&
604 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
605 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
606 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
607 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
608 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
610 // For those bits in B that are known to be zero, we can propagate known
611 // bits from the RHS to V.
612 KnownZero |= RHSKnownZero & BKnownZero;
613 KnownOne |= RHSKnownOne & BKnownZero;
614 // assume(~(v | b) = a)
615 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
617 Pred == ICmpInst::ICMP_EQ &&
618 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
619 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
620 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
621 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
622 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
624 // For those bits in B that are known to be zero, we can propagate
625 // inverted known bits from the RHS to V.
626 KnownZero |= RHSKnownOne & BKnownZero;
627 KnownOne |= RHSKnownZero & BKnownZero;
629 } else if (match(Arg,
630 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
631 Pred == ICmpInst::ICMP_EQ &&
632 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
633 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
634 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
635 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
636 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
638 // For those bits in B that are known to be zero, we can propagate known
639 // bits from the RHS to V. For those bits in B that are known to be one,
640 // we can propagate inverted known bits from the RHS to V.
641 KnownZero |= RHSKnownZero & BKnownZero;
642 KnownOne |= RHSKnownOne & BKnownZero;
643 KnownZero |= RHSKnownOne & BKnownOne;
644 KnownOne |= RHSKnownZero & BKnownOne;
645 // assume(~(v ^ b) = a)
646 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
648 Pred == ICmpInst::ICMP_EQ &&
649 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
650 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
651 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
652 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
653 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
655 // For those bits in B that are known to be zero, we can propagate
656 // inverted known bits from the RHS to V. For those bits in B that are
657 // known to be one, we can propagate known bits from the RHS to V.
658 KnownZero |= RHSKnownOne & BKnownZero;
659 KnownOne |= RHSKnownZero & BKnownZero;
660 KnownZero |= RHSKnownZero & BKnownOne;
661 KnownOne |= RHSKnownOne & BKnownOne;
662 // assume(v << c = a)
663 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
665 Pred == ICmpInst::ICMP_EQ &&
666 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
667 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
668 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
669 // For those bits in RHS that are known, we can propagate them to known
670 // bits in V shifted to the right by C.
671 KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
672 KnownOne |= RHSKnownOne.lshr(C->getZExtValue());
673 // assume(~(v << c) = a)
674 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
676 Pred == ICmpInst::ICMP_EQ &&
677 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
678 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
679 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
680 // For those bits in RHS that are known, we can propagate them inverted
681 // to known bits in V shifted to the right by C.
682 KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
683 KnownOne |= RHSKnownZero.lshr(C->getZExtValue());
684 // assume(v >> c = a)
685 } else if (match(Arg,
686 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
687 m_AShr(m_V, m_ConstantInt(C))),
689 Pred == ICmpInst::ICMP_EQ &&
690 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
691 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
692 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
693 // For those bits in RHS that are known, we can propagate them to known
694 // bits in V shifted to the right by C.
695 KnownZero |= RHSKnownZero << C->getZExtValue();
696 KnownOne |= RHSKnownOne << C->getZExtValue();
697 // assume(~(v >> c) = a)
698 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
699 m_LShr(m_V, m_ConstantInt(C)),
700 m_AShr(m_V, m_ConstantInt(C)))),
702 Pred == ICmpInst::ICMP_EQ &&
703 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
704 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
705 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
706 // For those bits in RHS that are known, we can propagate them inverted
707 // to known bits in V shifted to the right by C.
708 KnownZero |= RHSKnownOne << C->getZExtValue();
709 KnownOne |= RHSKnownZero << C->getZExtValue();
710 // assume(v >=_s c) where c is non-negative
711 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
712 Pred == ICmpInst::ICMP_SGE &&
713 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
714 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
715 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
717 if (RHSKnownZero.isNegative()) {
718 // We know that the sign bit is zero.
719 KnownZero |= APInt::getSignBit(BitWidth);
721 // assume(v >_s c) where c is at least -1.
722 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
723 Pred == ICmpInst::ICMP_SGT &&
724 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
725 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
726 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
728 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
729 // We know that the sign bit is zero.
730 KnownZero |= APInt::getSignBit(BitWidth);
732 // assume(v <=_s c) where c is negative
733 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
734 Pred == ICmpInst::ICMP_SLE &&
735 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
736 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
737 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
739 if (RHSKnownOne.isNegative()) {
740 // We know that the sign bit is one.
741 KnownOne |= APInt::getSignBit(BitWidth);
743 // assume(v <_s c) where c is non-positive
744 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
745 Pred == ICmpInst::ICMP_SLT &&
746 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
747 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
748 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
750 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
751 // We know that the sign bit is one.
752 KnownOne |= APInt::getSignBit(BitWidth);
755 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
756 Pred == ICmpInst::ICMP_ULE &&
757 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
758 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
759 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
761 // Whatever high bits in c are zero are known to be zero.
763 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
765 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
766 Pred == ICmpInst::ICMP_ULT &&
767 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
768 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
769 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
771 // Whatever high bits in c are zero are known to be zero (if c is a power
772 // of 2, then one more).
773 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
775 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
778 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
783 // Compute known bits from a shift operator, including those with a
784 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
785 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
786 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
787 // functors that, given the known-zero or known-one bits respectively, and a
788 // shift amount, compute the implied known-zero or known-one bits of the shift
789 // operator's result respectively for that shift amount. The results from calling
790 // KZF and KOF are conservatively combined for all permitted shift amounts.
791 static void computeKnownBitsFromShiftOperator(
792 const Operator *I, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2,
793 APInt &KnownOne2, unsigned Depth, const Query &Q,
794 function_ref<APInt(const APInt &, unsigned)> KZF,
795 function_ref<APInt(const APInt &, unsigned)> KOF) {
796 unsigned BitWidth = KnownZero.getBitWidth();
798 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
799 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
801 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
802 KnownZero = KZF(KnownZero, ShiftAmt);
803 KnownOne = KOF(KnownOne, ShiftAmt);
804 // If there is conflict between KnownZero and KnownOne, this must be an
805 // overflowing left shift, so the shift result is undefined. Clear KnownZero
806 // and KnownOne bits so that other code could propagate this undef.
807 if ((KnownZero & KnownOne) != 0) {
808 KnownZero.clearAllBits();
809 KnownOne.clearAllBits();
815 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
817 // Note: We cannot use KnownZero.getLimitedValue() here, because if
818 // BitWidth > 64 and any upper bits are known, we'll end up returning the
819 // limit value (which implies all bits are known).
820 uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
821 uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
823 // It would be more-clearly correct to use the two temporaries for this
824 // calculation. Reusing the APInts here to prevent unnecessary allocations.
825 KnownZero.clearAllBits();
826 KnownOne.clearAllBits();
828 // If we know the shifter operand is nonzero, we can sometimes infer more
829 // known bits. However this is expensive to compute, so be lazy about it and
830 // only compute it when absolutely necessary.
831 Optional<bool> ShifterOperandIsNonZero;
833 // Early exit if we can't constrain any well-defined shift amount.
834 if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
835 ShifterOperandIsNonZero =
836 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
837 if (!*ShifterOperandIsNonZero)
841 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
843 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
844 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
845 // Combine the shifted known input bits only for those shift amounts
846 // compatible with its known constraints.
847 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
849 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
851 // If we know the shifter is nonzero, we may be able to infer more known
852 // bits. This check is sunk down as far as possible to avoid the expensive
853 // call to isKnownNonZero if the cheaper checks above fail.
855 if (!ShifterOperandIsNonZero.hasValue())
856 ShifterOperandIsNonZero =
857 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
858 if (*ShifterOperandIsNonZero)
862 KnownZero &= KZF(KnownZero2, ShiftAmt);
863 KnownOne &= KOF(KnownOne2, ShiftAmt);
866 // If there are no compatible shift amounts, then we've proven that the shift
867 // amount must be >= the BitWidth, and the result is undefined. We could
868 // return anything we'd like, but we need to make sure the sets of known bits
869 // stay disjoint (it should be better for some other code to actually
870 // propagate the undef than to pick a value here using known bits).
871 if ((KnownZero & KnownOne) != 0) {
872 KnownZero.clearAllBits();
873 KnownOne.clearAllBits();
877 static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
878 APInt &KnownOne, unsigned Depth,
880 unsigned BitWidth = KnownZero.getBitWidth();
882 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
883 switch (I->getOpcode()) {
885 case Instruction::Load:
886 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
887 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
889 case Instruction::And: {
890 // If either the LHS or the RHS are Zero, the result is zero.
891 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
892 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
894 // Output known-1 bits are only known if set in both the LHS & RHS.
895 KnownOne &= KnownOne2;
896 // Output known-0 are known to be clear if zero in either the LHS | RHS.
897 KnownZero |= KnownZero2;
899 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
900 // here we handle the more general case of adding any odd number by
901 // matching the form add(x, add(x, y)) where y is odd.
902 // TODO: This could be generalized to clearing any bit set in y where the
903 // following bit is known to be unset in y.
905 if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
907 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
909 APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
910 computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q);
911 if (KnownOne3.countTrailingOnes() > 0)
912 KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
916 case Instruction::Or: {
917 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
918 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
920 // Output known-0 bits are only known if clear in both the LHS & RHS.
921 KnownZero &= KnownZero2;
922 // Output known-1 are known to be set if set in either the LHS | RHS.
923 KnownOne |= KnownOne2;
926 case Instruction::Xor: {
927 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
928 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
930 // Output known-0 bits are known if clear or set in both the LHS & RHS.
931 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
932 // Output known-1 are known to be set if set in only one of the LHS, RHS.
933 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
934 KnownZero = KnownZeroOut;
937 case Instruction::Mul: {
938 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
939 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
940 KnownOne, KnownZero2, KnownOne2, Depth, Q);
943 case Instruction::UDiv: {
944 // For the purposes of computing leading zeros we can conservatively
945 // treat a udiv as a logical right shift by the power of 2 known to
946 // be less than the denominator.
947 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
948 unsigned LeadZ = KnownZero2.countLeadingOnes();
950 KnownOne2.clearAllBits();
951 KnownZero2.clearAllBits();
952 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
953 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
954 if (RHSUnknownLeadingOnes != BitWidth)
955 LeadZ = std::min(BitWidth,
956 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
958 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
961 case Instruction::Select: {
962 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
963 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
967 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
968 if (SelectPatternResult::isMinOrMax(SPF)) {
969 computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q);
970 computeKnownBits(LHS, KnownZero2, KnownOne2, Depth + 1, Q);
972 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
973 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
976 unsigned MaxHighOnes = 0;
977 unsigned MaxHighZeros = 0;
978 if (SPF == SPF_SMAX) {
979 // If both sides are negative, the result is negative.
980 if (KnownOne[BitWidth - 1] && KnownOne2[BitWidth - 1])
981 // We can derive a lower bound on the result by taking the max of the
984 std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
985 // If either side is non-negative, the result is non-negative.
986 else if (KnownZero[BitWidth - 1] || KnownZero2[BitWidth - 1])
988 } else if (SPF == SPF_SMIN) {
989 // If both sides are non-negative, the result is non-negative.
990 if (KnownZero[BitWidth - 1] && KnownZero2[BitWidth - 1])
991 // We can derive an upper bound on the result by taking the max of the
992 // leading zero bits.
993 MaxHighZeros = std::max(KnownZero.countLeadingOnes(),
994 KnownZero2.countLeadingOnes());
995 // If either side is negative, the result is negative.
996 else if (KnownOne[BitWidth - 1] || KnownOne2[BitWidth - 1])
998 } else if (SPF == SPF_UMAX) {
999 // We can derive a lower bound on the result by taking the max of the
1000 // leading one bits.
1002 std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
1003 } else if (SPF == SPF_UMIN) {
1004 // We can derive an upper bound on the result by taking the max of the
1005 // leading zero bits.
1007 std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes());
1010 // Only known if known in both the LHS and RHS.
1011 KnownOne &= KnownOne2;
1012 KnownZero &= KnownZero2;
1013 if (MaxHighOnes > 0)
1014 KnownOne |= APInt::getHighBitsSet(BitWidth, MaxHighOnes);
1015 if (MaxHighZeros > 0)
1016 KnownZero |= APInt::getHighBitsSet(BitWidth, MaxHighZeros);
1019 case Instruction::FPTrunc:
1020 case Instruction::FPExt:
1021 case Instruction::FPToUI:
1022 case Instruction::FPToSI:
1023 case Instruction::SIToFP:
1024 case Instruction::UIToFP:
1025 break; // Can't work with floating point.
1026 case Instruction::PtrToInt:
1027 case Instruction::IntToPtr:
1028 // Fall through and handle them the same as zext/trunc.
1030 case Instruction::ZExt:
1031 case Instruction::Trunc: {
1032 Type *SrcTy = I->getOperand(0)->getType();
1034 unsigned SrcBitWidth;
1035 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1036 // which fall through here.
1037 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1039 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1040 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1041 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1042 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1043 KnownZero = KnownZero.zextOrTrunc(BitWidth);
1044 KnownOne = KnownOne.zextOrTrunc(BitWidth);
1045 // Any top bits are known to be zero.
1046 if (BitWidth > SrcBitWidth)
1047 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1050 case Instruction::BitCast: {
1051 Type *SrcTy = I->getOperand(0)->getType();
1052 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1053 // TODO: For now, not handling conversions like:
1054 // (bitcast i64 %x to <2 x i32>)
1055 !I->getType()->isVectorTy()) {
1056 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1061 case Instruction::SExt: {
1062 // Compute the bits in the result that are not present in the input.
1063 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1065 KnownZero = KnownZero.trunc(SrcBitWidth);
1066 KnownOne = KnownOne.trunc(SrcBitWidth);
1067 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1068 KnownZero = KnownZero.zext(BitWidth);
1069 KnownOne = KnownOne.zext(BitWidth);
1071 // If the sign bit of the input is known set or clear, then we know the
1072 // top bits of the result.
1073 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
1074 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1075 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
1076 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1079 case Instruction::Shl: {
1080 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1081 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1082 auto KZF = [BitWidth, NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1084 (KnownZero << ShiftAmt) |
1085 APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1086 // If this shift has "nsw" keyword, then the result is either a poison
1087 // value or has the same sign bit as the first operand.
1088 if (NSW && KnownZero.isNegative())
1089 KZResult.setBit(BitWidth - 1);
1093 auto KOF = [BitWidth, NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1094 APInt KOResult = KnownOne << ShiftAmt;
1095 if (NSW && KnownOne.isNegative())
1096 KOResult.setBit(BitWidth - 1);
1100 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1101 KnownZero2, KnownOne2, Depth, Q, KZF,
1105 case Instruction::LShr: {
1106 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1107 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1108 return APIntOps::lshr(KnownZero, ShiftAmt) |
1109 // High bits known zero.
1110 APInt::getHighBitsSet(BitWidth, ShiftAmt);
1113 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1114 return APIntOps::lshr(KnownOne, ShiftAmt);
1117 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1118 KnownZero2, KnownOne2, Depth, Q, KZF,
1122 case Instruction::AShr: {
1123 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1124 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1125 return APIntOps::ashr(KnownZero, ShiftAmt);
1128 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1129 return APIntOps::ashr(KnownOne, ShiftAmt);
1132 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1133 KnownZero2, KnownOne2, Depth, Q, KZF,
1137 case Instruction::Sub: {
1138 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1139 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1140 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1144 case Instruction::Add: {
1145 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1146 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1147 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1151 case Instruction::SRem:
1152 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1153 APInt RA = Rem->getValue().abs();
1154 if (RA.isPowerOf2()) {
1155 APInt LowBits = RA - 1;
1156 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1159 // The low bits of the first operand are unchanged by the srem.
1160 KnownZero = KnownZero2 & LowBits;
1161 KnownOne = KnownOne2 & LowBits;
1163 // If the first operand is non-negative or has all low bits zero, then
1164 // the upper bits are all zero.
1165 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1166 KnownZero |= ~LowBits;
1168 // If the first operand is negative and not all low bits are zero, then
1169 // the upper bits are all one.
1170 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1171 KnownOne |= ~LowBits;
1173 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1177 // The sign bit is the LHS's sign bit, except when the result of the
1178 // remainder is zero.
1179 if (KnownZero.isNonNegative()) {
1180 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1181 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1183 // If it's known zero, our sign bit is also zero.
1184 if (LHSKnownZero.isNegative())
1185 KnownZero.setBit(BitWidth - 1);
1189 case Instruction::URem: {
1190 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1191 const APInt &RA = Rem->getValue();
1192 if (RA.isPowerOf2()) {
1193 APInt LowBits = (RA - 1);
1194 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1195 KnownZero |= ~LowBits;
1196 KnownOne &= LowBits;
1201 // Since the result is less than or equal to either operand, any leading
1202 // zero bits in either operand must also exist in the result.
1203 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1204 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1206 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1207 KnownZero2.countLeadingOnes());
1208 KnownOne.clearAllBits();
1209 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1213 case Instruction::Alloca: {
1214 const AllocaInst *AI = cast<AllocaInst>(I);
1215 unsigned Align = AI->getAlignment();
1217 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1220 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1223 case Instruction::GetElementPtr: {
1224 // Analyze all of the subscripts of this getelementptr instruction
1225 // to determine if we can prove known low zero bits.
1226 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1227 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1229 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1231 gep_type_iterator GTI = gep_type_begin(I);
1232 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1233 Value *Index = I->getOperand(i);
1234 if (StructType *STy = GTI.getStructTypeOrNull()) {
1235 // Handle struct member offset arithmetic.
1237 // Handle case when index is vector zeroinitializer
1238 Constant *CIndex = cast<Constant>(Index);
1239 if (CIndex->isZeroValue())
1242 if (CIndex->getType()->isVectorTy())
1243 Index = CIndex->getSplatValue();
1245 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1246 const StructLayout *SL = Q.DL.getStructLayout(STy);
1247 uint64_t Offset = SL->getElementOffset(Idx);
1248 TrailZ = std::min<unsigned>(TrailZ,
1249 countTrailingZeros(Offset));
1251 // Handle array index arithmetic.
1252 Type *IndexedTy = GTI.getIndexedType();
1253 if (!IndexedTy->isSized()) {
1257 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1258 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1259 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1260 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1261 TrailZ = std::min(TrailZ,
1262 unsigned(countTrailingZeros(TypeSize) +
1263 LocalKnownZero.countTrailingOnes()));
1267 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1270 case Instruction::PHI: {
1271 const PHINode *P = cast<PHINode>(I);
1272 // Handle the case of a simple two-predecessor recurrence PHI.
1273 // There's a lot more that could theoretically be done here, but
1274 // this is sufficient to catch some interesting cases.
1275 if (P->getNumIncomingValues() == 2) {
1276 for (unsigned i = 0; i != 2; ++i) {
1277 Value *L = P->getIncomingValue(i);
1278 Value *R = P->getIncomingValue(!i);
1279 Operator *LU = dyn_cast<Operator>(L);
1282 unsigned Opcode = LU->getOpcode();
1283 // Check for operations that have the property that if
1284 // both their operands have low zero bits, the result
1285 // will have low zero bits.
1286 if (Opcode == Instruction::Add ||
1287 Opcode == Instruction::Sub ||
1288 Opcode == Instruction::And ||
1289 Opcode == Instruction::Or ||
1290 Opcode == Instruction::Mul) {
1291 Value *LL = LU->getOperand(0);
1292 Value *LR = LU->getOperand(1);
1293 // Find a recurrence.
1300 // Ok, we have a PHI of the form L op= R. Check for low
1302 computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1304 // We need to take the minimum number of known bits
1305 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1306 computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1308 KnownZero = APInt::getLowBitsSet(
1309 BitWidth, std::min(KnownZero2.countTrailingOnes(),
1310 KnownZero3.countTrailingOnes()));
1312 if (DontImproveNonNegativePhiBits)
1315 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1316 if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1317 // If initial value of recurrence is nonnegative, and we are adding
1318 // a nonnegative number with nsw, the result can only be nonnegative
1319 // or poison value regardless of the number of times we execute the
1320 // add in phi recurrence. If initial value is negative and we are
1321 // adding a negative number with nsw, the result can only be
1322 // negative or poison value. Similar arguments apply to sub and mul.
1324 // (add non-negative, non-negative) --> non-negative
1325 // (add negative, negative) --> negative
1326 if (Opcode == Instruction::Add) {
1327 if (KnownZero2.isNegative() && KnownZero3.isNegative())
1328 KnownZero.setBit(BitWidth - 1);
1329 else if (KnownOne2.isNegative() && KnownOne3.isNegative())
1330 KnownOne.setBit(BitWidth - 1);
1333 // (sub nsw non-negative, negative) --> non-negative
1334 // (sub nsw negative, non-negative) --> negative
1335 else if (Opcode == Instruction::Sub && LL == I) {
1336 if (KnownZero2.isNegative() && KnownOne3.isNegative())
1337 KnownZero.setBit(BitWidth - 1);
1338 else if (KnownOne2.isNegative() && KnownZero3.isNegative())
1339 KnownOne.setBit(BitWidth - 1);
1342 // (mul nsw non-negative, non-negative) --> non-negative
1343 else if (Opcode == Instruction::Mul && KnownZero2.isNegative() &&
1344 KnownZero3.isNegative())
1345 KnownZero.setBit(BitWidth - 1);
1353 // Unreachable blocks may have zero-operand PHI nodes.
1354 if (P->getNumIncomingValues() == 0)
1357 // Otherwise take the unions of the known bit sets of the operands,
1358 // taking conservative care to avoid excessive recursion.
1359 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1360 // Skip if every incoming value references to ourself.
1361 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1364 KnownZero = APInt::getAllOnesValue(BitWidth);
1365 KnownOne = APInt::getAllOnesValue(BitWidth);
1366 for (Value *IncValue : P->incoming_values()) {
1367 // Skip direct self references.
1368 if (IncValue == P) continue;
1370 KnownZero2 = APInt(BitWidth, 0);
1371 KnownOne2 = APInt(BitWidth, 0);
1372 // Recurse, but cap the recursion to one level, because we don't
1373 // want to waste time spinning around in loops.
1374 computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1375 KnownZero &= KnownZero2;
1376 KnownOne &= KnownOne2;
1377 // If all bits have been ruled out, there's no need to check
1379 if (!KnownZero && !KnownOne)
1385 case Instruction::Call:
1386 case Instruction::Invoke:
1387 // If range metadata is attached to this call, set known bits from that,
1388 // and then intersect with known bits based on other properties of the
1390 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1391 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1392 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1393 computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
1394 KnownZero |= KnownZero2;
1395 KnownOne |= KnownOne2;
1397 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1398 switch (II->getIntrinsicID()) {
1400 case Intrinsic::bswap:
1401 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1402 KnownZero |= KnownZero2.byteSwap();
1403 KnownOne |= KnownOne2.byteSwap();
1405 case Intrinsic::ctlz:
1406 case Intrinsic::cttz: {
1407 unsigned LowBits = Log2_32(BitWidth)+1;
1408 // If this call is undefined for 0, the result will be less than 2^n.
1409 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1411 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1414 case Intrinsic::ctpop: {
1415 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1416 // We can bound the space the count needs. Also, bits known to be zero
1417 // can't contribute to the population.
1418 unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1419 unsigned LeadingZeros =
1420 APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1421 assert(LeadingZeros <= BitWidth);
1422 KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1423 KnownOne &= ~KnownZero;
1424 // TODO: we could bound KnownOne using the lower bound on the number
1425 // of bits which might be set provided by popcnt KnownOne2.
1428 case Intrinsic::x86_sse42_crc32_64_64:
1429 KnownZero |= APInt::getHighBitsSet(64, 32);
1434 case Instruction::ExtractElement:
1435 // Look through extract element. At the moment we keep this simple and skip
1436 // tracking the specific element. But at least we might find information
1437 // valid for all elements of the vector (for example if vector is sign
1438 // extended, shifted, etc).
1439 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1441 case Instruction::ExtractValue:
1442 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1443 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1444 if (EVI->getNumIndices() != 1) break;
1445 if (EVI->getIndices()[0] == 0) {
1446 switch (II->getIntrinsicID()) {
1448 case Intrinsic::uadd_with_overflow:
1449 case Intrinsic::sadd_with_overflow:
1450 computeKnownBitsAddSub(true, II->getArgOperand(0),
1451 II->getArgOperand(1), false, KnownZero,
1452 KnownOne, KnownZero2, KnownOne2, Depth, Q);
1454 case Intrinsic::usub_with_overflow:
1455 case Intrinsic::ssub_with_overflow:
1456 computeKnownBitsAddSub(false, II->getArgOperand(0),
1457 II->getArgOperand(1), false, KnownZero,
1458 KnownOne, KnownZero2, KnownOne2, Depth, Q);
1460 case Intrinsic::umul_with_overflow:
1461 case Intrinsic::smul_with_overflow:
1462 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1463 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1472 /// Determine which bits of V are known to be either zero or one and return
1473 /// them in the KnownZero/KnownOne bit sets.
1475 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1476 /// we cannot optimize based on the assumption that it is zero without changing
1477 /// it to be an explicit zero. If we don't change it to zero, other code could
1478 /// optimized based on the contradictory assumption that it is non-zero.
1479 /// Because instcombine aggressively folds operations with undef args anyway,
1480 /// this won't lose us code quality.
1482 /// This function is defined on values with integer type, values with pointer
1483 /// type, and vectors of integers. In the case
1484 /// where V is a vector, known zero, and known one values are the
1485 /// same width as the vector element, and the bit is set only if it is true
1486 /// for all of the elements in the vector.
1487 void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
1488 unsigned Depth, const Query &Q) {
1489 assert(V && "No Value?");
1490 assert(Depth <= MaxDepth && "Limit Search Depth");
1491 unsigned BitWidth = KnownZero.getBitWidth();
1493 assert((V->getType()->isIntOrIntVectorTy() ||
1494 V->getType()->getScalarType()->isPointerTy()) &&
1495 "Not integer or pointer type!");
1496 assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1497 (!V->getType()->isIntOrIntVectorTy() ||
1498 V->getType()->getScalarSizeInBits() == BitWidth) &&
1499 KnownZero.getBitWidth() == BitWidth &&
1500 KnownOne.getBitWidth() == BitWidth &&
1501 "V, KnownOne and KnownZero should have same BitWidth");
1504 if (match(V, m_APInt(C))) {
1505 // We know all of the bits for a scalar constant or a splat vector constant!
1507 KnownZero = ~KnownOne;
1510 // Null and aggregate-zero are all-zeros.
1511 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1512 KnownOne.clearAllBits();
1513 KnownZero = APInt::getAllOnesValue(BitWidth);
1516 // Handle a constant vector by taking the intersection of the known bits of
1518 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1519 // We know that CDS must be a vector of integers. Take the intersection of
1521 KnownZero.setAllBits(); KnownOne.setAllBits();
1522 APInt Elt(KnownZero.getBitWidth(), 0);
1523 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1524 Elt = CDS->getElementAsInteger(i);
1531 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1532 // We know that CV must be a vector of integers. Take the intersection of
1534 KnownZero.setAllBits(); KnownOne.setAllBits();
1535 APInt Elt(KnownZero.getBitWidth(), 0);
1536 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1537 Constant *Element = CV->getAggregateElement(i);
1538 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1540 KnownZero.clearAllBits();
1541 KnownOne.clearAllBits();
1544 Elt = ElementCI->getValue();
1551 // Start out not knowing anything.
1552 KnownZero.clearAllBits(); KnownOne.clearAllBits();
1554 // We can't imply anything about undefs.
1555 if (isa<UndefValue>(V))
1558 // There's no point in looking through other users of ConstantData for
1559 // assumptions. Confirm that we've handled them all.
1560 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1562 // Limit search depth.
1563 // All recursive calls that increase depth must come after this.
1564 if (Depth == MaxDepth)
1567 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1568 // the bits of its aliasee.
1569 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1570 if (!GA->isInterposable())
1571 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1575 if (const Operator *I = dyn_cast<Operator>(V))
1576 computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1578 // Aligned pointers have trailing zeros - refine KnownZero set
1579 if (V->getType()->isPointerTy()) {
1580 unsigned Align = V->getPointerAlignment(Q.DL);
1582 KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1585 // computeKnownBitsFromAssume strictly refines KnownZero and
1586 // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1588 // Check whether a nearby assume intrinsic can determine some known bits.
1589 computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1591 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1594 /// Determine whether the sign bit is known to be zero or one.
1595 /// Convenience wrapper around computeKnownBits.
1596 void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
1597 unsigned Depth, const Query &Q) {
1598 unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1604 APInt ZeroBits(BitWidth, 0);
1605 APInt OneBits(BitWidth, 0);
1606 computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1607 KnownOne = OneBits[BitWidth - 1];
1608 KnownZero = ZeroBits[BitWidth - 1];
1611 /// Return true if the given value is known to have exactly one
1612 /// bit set when defined. For vectors return true if every element is known to
1613 /// be a power of two when defined. Supports values with integer or pointer
1614 /// types and vectors of integers.
1615 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1617 if (const Constant *C = dyn_cast<Constant>(V)) {
1618 if (C->isNullValue())
1621 const APInt *ConstIntOrConstSplatInt;
1622 if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1623 return ConstIntOrConstSplatInt->isPowerOf2();
1626 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1627 // it is shifted off the end then the result is undefined.
1628 if (match(V, m_Shl(m_One(), m_Value())))
1631 // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1632 // bottom. If it is shifted off the bottom then the result is undefined.
1633 if (match(V, m_LShr(m_SignBit(), m_Value())))
1636 // The remaining tests are all recursive, so bail out if we hit the limit.
1637 if (Depth++ == MaxDepth)
1640 Value *X = nullptr, *Y = nullptr;
1641 // A shift left or a logical shift right of a power of two is a power of two
1643 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1644 match(V, m_LShr(m_Value(X), m_Value()))))
1645 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1647 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1648 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1650 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1651 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1652 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1654 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1655 // A power of two and'd with anything is a power of two or zero.
1656 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1657 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1659 // X & (-X) is always a power of two or zero.
1660 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1665 // Adding a power-of-two or zero to the same power-of-two or zero yields
1666 // either the original power-of-two, a larger power-of-two or zero.
1667 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1668 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1669 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1670 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1671 match(X, m_And(m_Value(), m_Specific(Y))))
1672 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1674 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1675 match(Y, m_And(m_Value(), m_Specific(X))))
1676 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1679 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1680 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1681 computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1683 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1684 computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1685 // If i8 V is a power of two or zero:
1686 // ZeroBits: 1 1 1 0 1 1 1 1
1687 // ~ZeroBits: 0 0 0 1 0 0 0 0
1688 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1689 // If OrZero isn't set, we cannot give back a zero result.
1690 // Make sure either the LHS or RHS has a bit set.
1691 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1696 // An exact divide or right shift can only shift off zero bits, so the result
1697 // is a power of two only if the first operand is a power of two and not
1698 // copying a sign bit (sdiv int_min, 2).
1699 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1700 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1701 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1708 /// \brief Test whether a GEP's result is known to be non-null.
1710 /// Uses properties inherent in a GEP to try to determine whether it is known
1713 /// Currently this routine does not support vector GEPs.
1714 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1716 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1719 // FIXME: Support vector-GEPs.
1720 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1722 // If the base pointer is non-null, we cannot walk to a null address with an
1723 // inbounds GEP in address space zero.
1724 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1727 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1728 // If so, then the GEP cannot produce a null pointer, as doing so would
1729 // inherently violate the inbounds contract within address space zero.
1730 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1731 GTI != GTE; ++GTI) {
1732 // Struct types are easy -- they must always be indexed by a constant.
1733 if (StructType *STy = GTI.getStructTypeOrNull()) {
1734 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1735 unsigned ElementIdx = OpC->getZExtValue();
1736 const StructLayout *SL = Q.DL.getStructLayout(STy);
1737 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1738 if (ElementOffset > 0)
1743 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1744 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1747 // Fast path the constant operand case both for efficiency and so we don't
1748 // increment Depth when just zipping down an all-constant GEP.
1749 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1755 // We post-increment Depth here because while isKnownNonZero increments it
1756 // as well, when we pop back up that increment won't persist. We don't want
1757 // to recurse 10k times just because we have 10k GEP operands. We don't
1758 // bail completely out because we want to handle constant GEPs regardless
1760 if (Depth++ >= MaxDepth)
1763 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1770 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1771 /// ensure that the value it's attached to is never Value? 'RangeType' is
1772 /// is the type of the value described by the range.
1773 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1774 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1775 assert(NumRanges >= 1);
1776 for (unsigned i = 0; i < NumRanges; ++i) {
1777 ConstantInt *Lower =
1778 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1779 ConstantInt *Upper =
1780 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1781 ConstantRange Range(Lower->getValue(), Upper->getValue());
1782 if (Range.contains(Value))
1788 /// Return true if the given value is known to be non-zero when defined.
1789 /// For vectors return true if every element is known to be non-zero when
1790 /// defined. Supports values with integer or pointer type and vectors of
1792 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1793 if (auto *C = dyn_cast<Constant>(V)) {
1794 if (C->isNullValue())
1796 if (isa<ConstantInt>(C))
1797 // Must be non-zero due to null test above.
1800 // For constant vectors, check that all elements are undefined or known
1801 // non-zero to determine that the whole vector is known non-zero.
1802 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1803 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1804 Constant *Elt = C->getAggregateElement(i);
1805 if (!Elt || Elt->isNullValue())
1807 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1816 if (auto *I = dyn_cast<Instruction>(V)) {
1817 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1818 // If the possible ranges don't contain zero, then the value is
1819 // definitely non-zero.
1820 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1821 const APInt ZeroValue(Ty->getBitWidth(), 0);
1822 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1828 // The remaining tests are all recursive, so bail out if we hit the limit.
1829 if (Depth++ >= MaxDepth)
1832 // Check for pointer simplifications.
1833 if (V->getType()->isPointerTy()) {
1834 if (isKnownNonNull(V))
1836 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1837 if (isGEPKnownNonNull(GEP, Depth, Q))
1841 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1843 // X | Y != 0 if X != 0 or Y != 0.
1844 Value *X = nullptr, *Y = nullptr;
1845 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1846 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1848 // ext X != 0 if X != 0.
1849 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1850 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1852 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1853 // if the lowest bit is shifted off the end.
1854 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1855 // shl nuw can't remove any non-zero bits.
1856 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1857 if (BO->hasNoUnsignedWrap())
1858 return isKnownNonZero(X, Depth, Q);
1860 APInt KnownZero(BitWidth, 0);
1861 APInt KnownOne(BitWidth, 0);
1862 computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1866 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1867 // defined if the sign bit is shifted off the end.
1868 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1869 // shr exact can only shift out zero bits.
1870 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1872 return isKnownNonZero(X, Depth, Q);
1874 bool XKnownNonNegative, XKnownNegative;
1875 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1879 // If the shifter operand is a constant, and all of the bits shifted
1880 // out are known to be zero, and X is known non-zero then at least one
1881 // non-zero bit must remain.
1882 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1883 APInt KnownZero(BitWidth, 0);
1884 APInt KnownOne(BitWidth, 0);
1885 computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1887 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1888 // Is there a known one in the portion not shifted out?
1889 if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1891 // Are all the bits to be shifted out known zero?
1892 if (KnownZero.countTrailingOnes() >= ShiftVal)
1893 return isKnownNonZero(X, Depth, Q);
1896 // div exact can only produce a zero if the dividend is zero.
1897 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1898 return isKnownNonZero(X, Depth, Q);
1901 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1902 bool XKnownNonNegative, XKnownNegative;
1903 bool YKnownNonNegative, YKnownNegative;
1904 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1905 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1907 // If X and Y are both non-negative (as signed values) then their sum is not
1908 // zero unless both X and Y are zero.
1909 if (XKnownNonNegative && YKnownNonNegative)
1910 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1913 // If X and Y are both negative (as signed values) then their sum is not
1914 // zero unless both X and Y equal INT_MIN.
1915 if (BitWidth && XKnownNegative && YKnownNegative) {
1916 APInt KnownZero(BitWidth, 0);
1917 APInt KnownOne(BitWidth, 0);
1918 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1919 // The sign bit of X is set. If some other bit is set then X is not equal
1921 computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1922 if ((KnownOne & Mask) != 0)
1924 // The sign bit of Y is set. If some other bit is set then Y is not equal
1926 computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1927 if ((KnownOne & Mask) != 0)
1931 // The sum of a non-negative number and a power of two is not zero.
1932 if (XKnownNonNegative &&
1933 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1935 if (YKnownNonNegative &&
1936 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1940 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1941 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1942 // If X and Y are non-zero then so is X * Y as long as the multiplication
1943 // does not overflow.
1944 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1945 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1948 // (C ? X : Y) != 0 if X != 0 and Y != 0.
1949 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1950 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1951 isKnownNonZero(SI->getFalseValue(), Depth, Q))
1955 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1956 // Try and detect a recurrence that monotonically increases from a
1957 // starting value, as these are common as induction variables.
1958 if (PN->getNumIncomingValues() == 2) {
1959 Value *Start = PN->getIncomingValue(0);
1960 Value *Induction = PN->getIncomingValue(1);
1961 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1962 std::swap(Start, Induction);
1963 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1964 if (!C->isZero() && !C->isNegative()) {
1966 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1967 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1973 // Check if all incoming values are non-zero constant.
1974 bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1975 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1977 if (AllNonZeroConstants)
1981 if (!BitWidth) return false;
1982 APInt KnownZero(BitWidth, 0);
1983 APInt KnownOne(BitWidth, 0);
1984 computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1985 return KnownOne != 0;
1988 /// Return true if V2 == V1 + X, where X is known non-zero.
1989 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
1990 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1991 if (!BO || BO->getOpcode() != Instruction::Add)
1993 Value *Op = nullptr;
1994 if (V2 == BO->getOperand(0))
1995 Op = BO->getOperand(1);
1996 else if (V2 == BO->getOperand(1))
1997 Op = BO->getOperand(0);
2000 return isKnownNonZero(Op, 0, Q);
2003 /// Return true if it is known that V1 != V2.
2004 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2005 if (V1->getType()->isVectorTy() || V1 == V2)
2007 if (V1->getType() != V2->getType())
2008 // We can't look through casts yet.
2010 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2013 if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
2014 // Are any known bits in V1 contradictory to known bits in V2? If V1
2015 // has a known zero where V2 has a known one, they must not be equal.
2016 auto BitWidth = Ty->getBitWidth();
2017 APInt KnownZero1(BitWidth, 0);
2018 APInt KnownOne1(BitWidth, 0);
2019 computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
2020 APInt KnownZero2(BitWidth, 0);
2021 APInt KnownOne2(BitWidth, 0);
2022 computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
2024 auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
2025 if (OppositeBits.getBoolValue())
2031 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2032 /// simplify operations downstream. Mask is known to be zero for bits that V
2035 /// This function is defined on values with integer type, values with pointer
2036 /// type, and vectors of integers. In the case
2037 /// where V is a vector, the mask, known zero, and known one values are the
2038 /// same width as the vector element, and the bit is set only if it is true
2039 /// for all of the elements in the vector.
2040 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2042 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
2043 computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2044 return (KnownZero & Mask) == Mask;
2047 /// For vector constants, loop over the elements and find the constant with the
2048 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2049 /// or if any element was not analyzed; otherwise, return the count for the
2050 /// element with the minimum number of sign bits.
2051 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2053 const auto *CV = dyn_cast<Constant>(V);
2054 if (!CV || !CV->getType()->isVectorTy())
2057 unsigned MinSignBits = TyBits;
2058 unsigned NumElts = CV->getType()->getVectorNumElements();
2059 for (unsigned i = 0; i != NumElts; ++i) {
2060 // If we find a non-ConstantInt, bail out.
2061 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2065 // If the sign bit is 1, flip the bits, so we always count leading zeros.
2066 APInt EltVal = Elt->getValue();
2067 if (EltVal.isNegative())
2069 MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2075 /// Return the number of times the sign bit of the register is replicated into
2076 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2077 /// (itself), but other cases can give us information. For example, immediately
2078 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2079 /// other, so we return 3. For vectors, return the number of sign bits for the
2080 /// vector element with the mininum number of known sign bits.
2081 unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q) {
2082 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2084 unsigned FirstAnswer = 1;
2086 // Note that ConstantInt is handled by the general computeKnownBits case
2089 if (Depth == MaxDepth)
2090 return 1; // Limit search depth.
2092 const Operator *U = dyn_cast<Operator>(V);
2093 switch (Operator::getOpcode(V)) {
2095 case Instruction::SExt:
2096 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2097 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2099 case Instruction::SDiv: {
2100 const APInt *Denominator;
2101 // sdiv X, C -> adds log(C) sign bits.
2102 if (match(U->getOperand(1), m_APInt(Denominator))) {
2104 // Ignore non-positive denominator.
2105 if (!Denominator->isStrictlyPositive())
2108 // Calculate the incoming numerator bits.
2109 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2111 // Add floor(log(C)) bits to the numerator bits.
2112 return std::min(TyBits, NumBits + Denominator->logBase2());
2117 case Instruction::SRem: {
2118 const APInt *Denominator;
2119 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2120 // positive constant. This let us put a lower bound on the number of sign
2122 if (match(U->getOperand(1), m_APInt(Denominator))) {
2124 // Ignore non-positive denominator.
2125 if (!Denominator->isStrictlyPositive())
2128 // Calculate the incoming numerator bits. SRem by a positive constant
2129 // can't lower the number of sign bits.
2131 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2133 // Calculate the leading sign bit constraints by examining the
2134 // denominator. Given that the denominator is positive, there are two
2137 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2138 // (1 << ceilLogBase2(C)).
2140 // 2. the numerator is negative. Then the result range is (-C,0] and
2141 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2143 // Thus a lower bound on the number of sign bits is `TyBits -
2144 // ceilLogBase2(C)`.
2146 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2147 return std::max(NumrBits, ResBits);
2152 case Instruction::AShr: {
2153 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2154 // ashr X, C -> adds C sign bits. Vectors too.
2156 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2157 Tmp += ShAmt->getZExtValue();
2158 if (Tmp > TyBits) Tmp = TyBits;
2162 case Instruction::Shl: {
2164 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2165 // shl destroys sign bits.
2166 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2167 Tmp2 = ShAmt->getZExtValue();
2168 if (Tmp2 >= TyBits || // Bad shift.
2169 Tmp2 >= Tmp) break; // Shifted all sign bits out.
2174 case Instruction::And:
2175 case Instruction::Or:
2176 case Instruction::Xor: // NOT is handled here.
2177 // Logical binary ops preserve the number of sign bits at the worst.
2178 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2180 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2181 FirstAnswer = std::min(Tmp, Tmp2);
2182 // We computed what we know about the sign bits as our first
2183 // answer. Now proceed to the generic code that uses
2184 // computeKnownBits, and pick whichever answer is better.
2188 case Instruction::Select:
2189 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2190 if (Tmp == 1) return 1; // Early out.
2191 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2192 return std::min(Tmp, Tmp2);
2194 case Instruction::Add:
2195 // Add can have at most one carry bit. Thus we know that the output
2196 // is, at worst, one more bit than the inputs.
2197 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2198 if (Tmp == 1) return 1; // Early out.
2200 // Special case decrementing a value (ADD X, -1):
2201 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2202 if (CRHS->isAllOnesValue()) {
2203 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2204 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2206 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2208 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2211 // If we are subtracting one from a positive number, there is no carry
2212 // out of the result.
2213 if (KnownZero.isNegative())
2217 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2218 if (Tmp2 == 1) return 1;
2219 return std::min(Tmp, Tmp2)-1;
2221 case Instruction::Sub:
2222 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2223 if (Tmp2 == 1) return 1;
2226 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2227 if (CLHS->isNullValue()) {
2228 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2229 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2230 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2232 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2235 // If the input is known to be positive (the sign bit is known clear),
2236 // the output of the NEG has the same number of sign bits as the input.
2237 if (KnownZero.isNegative())
2240 // Otherwise, we treat this like a SUB.
2243 // Sub can have at most one carry bit. Thus we know that the output
2244 // is, at worst, one more bit than the inputs.
2245 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2246 if (Tmp == 1) return 1; // Early out.
2247 return std::min(Tmp, Tmp2)-1;
2249 case Instruction::PHI: {
2250 const PHINode *PN = cast<PHINode>(U);
2251 unsigned NumIncomingValues = PN->getNumIncomingValues();
2252 // Don't analyze large in-degree PHIs.
2253 if (NumIncomingValues > 4) break;
2254 // Unreachable blocks may have zero-operand PHI nodes.
2255 if (NumIncomingValues == 0) break;
2257 // Take the minimum of all incoming values. This can't infinitely loop
2258 // because of our depth threshold.
2259 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2260 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2261 if (Tmp == 1) return Tmp;
2263 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2268 case Instruction::Trunc:
2269 // FIXME: it's tricky to do anything useful for this, but it is an important
2270 // case for targets like X86.
2273 case Instruction::ExtractElement:
2274 // Look through extract element. At the moment we keep this simple and skip
2275 // tracking the specific element. But at least we might find information
2276 // valid for all elements of the vector (for example if vector is sign
2277 // extended, shifted, etc).
2278 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2281 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2282 // use this information.
2284 // If we can examine all elements of a vector constant successfully, we're
2285 // done (we can't do any better than that). If not, keep trying.
2286 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2289 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2290 computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2292 // If we know that the sign bit is either zero or one, determine the number of
2293 // identical bits in the top of the input value.
2294 if (KnownZero.isNegative())
2295 return std::max(FirstAnswer, KnownZero.countLeadingOnes());
2297 if (KnownOne.isNegative())
2298 return std::max(FirstAnswer, KnownOne.countLeadingOnes());
2300 // computeKnownBits gave us no extra information about the top bits.
2304 /// This function computes the integer multiple of Base that equals V.
2305 /// If successful, it returns true and returns the multiple in
2306 /// Multiple. If unsuccessful, it returns false. It looks
2307 /// through SExt instructions only if LookThroughSExt is true.
2308 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2309 bool LookThroughSExt, unsigned Depth) {
2310 const unsigned MaxDepth = 6;
2312 assert(V && "No Value?");
2313 assert(Depth <= MaxDepth && "Limit Search Depth");
2314 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2316 Type *T = V->getType();
2318 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2328 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2329 Constant *BaseVal = ConstantInt::get(T, Base);
2330 if (CO && CO == BaseVal) {
2332 Multiple = ConstantInt::get(T, 1);
2336 if (CI && CI->getZExtValue() % Base == 0) {
2337 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2341 if (Depth == MaxDepth) return false; // Limit search depth.
2343 Operator *I = dyn_cast<Operator>(V);
2344 if (!I) return false;
2346 switch (I->getOpcode()) {
2348 case Instruction::SExt:
2349 if (!LookThroughSExt) return false;
2350 // otherwise fall through to ZExt
2351 case Instruction::ZExt:
2352 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2353 LookThroughSExt, Depth+1);
2354 case Instruction::Shl:
2355 case Instruction::Mul: {
2356 Value *Op0 = I->getOperand(0);
2357 Value *Op1 = I->getOperand(1);
2359 if (I->getOpcode() == Instruction::Shl) {
2360 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2361 if (!Op1CI) return false;
2362 // Turn Op0 << Op1 into Op0 * 2^Op1
2363 APInt Op1Int = Op1CI->getValue();
2364 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2365 APInt API(Op1Int.getBitWidth(), 0);
2366 API.setBit(BitToSet);
2367 Op1 = ConstantInt::get(V->getContext(), API);
2370 Value *Mul0 = nullptr;
2371 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2372 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2373 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2374 if (Op1C->getType()->getPrimitiveSizeInBits() <
2375 MulC->getType()->getPrimitiveSizeInBits())
2376 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2377 if (Op1C->getType()->getPrimitiveSizeInBits() >
2378 MulC->getType()->getPrimitiveSizeInBits())
2379 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2381 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2382 Multiple = ConstantExpr::getMul(MulC, Op1C);
2386 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2387 if (Mul0CI->getValue() == 1) {
2388 // V == Base * Op1, so return Op1
2394 Value *Mul1 = nullptr;
2395 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2396 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2397 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2398 if (Op0C->getType()->getPrimitiveSizeInBits() <
2399 MulC->getType()->getPrimitiveSizeInBits())
2400 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2401 if (Op0C->getType()->getPrimitiveSizeInBits() >
2402 MulC->getType()->getPrimitiveSizeInBits())
2403 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2405 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2406 Multiple = ConstantExpr::getMul(MulC, Op0C);
2410 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2411 if (Mul1CI->getValue() == 1) {
2412 // V == Base * Op0, so return Op0
2420 // We could not determine if V is a multiple of Base.
2424 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2425 const TargetLibraryInfo *TLI) {
2426 const Function *F = ICS.getCalledFunction();
2428 return Intrinsic::not_intrinsic;
2430 if (F->isIntrinsic())
2431 return F->getIntrinsicID();
2434 return Intrinsic::not_intrinsic;
2437 // We're going to make assumptions on the semantics of the functions, check
2438 // that the target knows that it's available in this environment and it does
2439 // not have local linkage.
2440 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2441 return Intrinsic::not_intrinsic;
2443 if (!ICS.onlyReadsMemory())
2444 return Intrinsic::not_intrinsic;
2446 // Otherwise check if we have a call to a function that can be turned into a
2447 // vector intrinsic.
2454 return Intrinsic::sin;
2458 return Intrinsic::cos;
2462 return Intrinsic::exp;
2464 case LibFunc::exp2f:
2465 case LibFunc::exp2l:
2466 return Intrinsic::exp2;
2470 return Intrinsic::log;
2471 case LibFunc::log10:
2472 case LibFunc::log10f:
2473 case LibFunc::log10l:
2474 return Intrinsic::log10;
2476 case LibFunc::log2f:
2477 case LibFunc::log2l:
2478 return Intrinsic::log2;
2480 case LibFunc::fabsf:
2481 case LibFunc::fabsl:
2482 return Intrinsic::fabs;
2484 case LibFunc::fminf:
2485 case LibFunc::fminl:
2486 return Intrinsic::minnum;
2488 case LibFunc::fmaxf:
2489 case LibFunc::fmaxl:
2490 return Intrinsic::maxnum;
2491 case LibFunc::copysign:
2492 case LibFunc::copysignf:
2493 case LibFunc::copysignl:
2494 return Intrinsic::copysign;
2495 case LibFunc::floor:
2496 case LibFunc::floorf:
2497 case LibFunc::floorl:
2498 return Intrinsic::floor;
2500 case LibFunc::ceilf:
2501 case LibFunc::ceill:
2502 return Intrinsic::ceil;
2503 case LibFunc::trunc:
2504 case LibFunc::truncf:
2505 case LibFunc::truncl:
2506 return Intrinsic::trunc;
2508 case LibFunc::rintf:
2509 case LibFunc::rintl:
2510 return Intrinsic::rint;
2511 case LibFunc::nearbyint:
2512 case LibFunc::nearbyintf:
2513 case LibFunc::nearbyintl:
2514 return Intrinsic::nearbyint;
2515 case LibFunc::round:
2516 case LibFunc::roundf:
2517 case LibFunc::roundl:
2518 return Intrinsic::round;
2522 return Intrinsic::pow;
2524 case LibFunc::sqrtf:
2525 case LibFunc::sqrtl:
2526 if (ICS->hasNoNaNs())
2527 return Intrinsic::sqrt;
2528 return Intrinsic::not_intrinsic;
2531 return Intrinsic::not_intrinsic;
2534 /// Return true if we can prove that the specified FP value is never equal to
2537 /// NOTE: this function will need to be revisited when we support non-default
2540 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2542 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2543 return !CFP->getValueAPF().isNegZero();
2545 // FIXME: Magic number! At the least, this should be given a name because it's
2546 // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2547 // expose it as a parameter, so it can be used for testing / experimenting.
2548 if (Depth == MaxDepth)
2549 return false; // Limit search depth.
2551 const Operator *I = dyn_cast<Operator>(V);
2552 if (!I) return false;
2554 // Check if the nsz fast-math flag is set
2555 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2556 if (FPO->hasNoSignedZeros())
2559 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2560 if (I->getOpcode() == Instruction::FAdd)
2561 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2562 if (CFP->isNullValue())
2565 // sitofp and uitofp turn into +0.0 for zero.
2566 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2569 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2570 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2574 // sqrt(-0.0) = -0.0, no other negative results are possible.
2575 case Intrinsic::sqrt:
2576 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2578 case Intrinsic::fabs:
2586 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2587 const TargetLibraryInfo *TLI,
2589 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2590 return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2592 // FIXME: Magic number! At the least, this should be given a name because it's
2593 // used similarly in CannotBeNegativeZero(). A better fix may be to
2594 // expose it as a parameter, so it can be used for testing / experimenting.
2595 if (Depth == MaxDepth)
2596 return false; // Limit search depth.
2598 const Operator *I = dyn_cast<Operator>(V);
2599 if (!I) return false;
2601 switch (I->getOpcode()) {
2603 // Unsigned integers are always nonnegative.
2604 case Instruction::UIToFP:
2606 case Instruction::FMul:
2607 // x*x is always non-negative or a NaN.
2608 if (I->getOperand(0) == I->getOperand(1))
2611 case Instruction::FAdd:
2612 case Instruction::FDiv:
2613 case Instruction::FRem:
2614 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2615 CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2616 case Instruction::Select:
2617 return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) &&
2618 CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2619 case Instruction::FPExt:
2620 case Instruction::FPTrunc:
2621 // Widening/narrowing never change sign.
2622 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2623 case Instruction::Call:
2624 Intrinsic::ID IID = getIntrinsicForCallSite(cast<CallInst>(I), TLI);
2628 case Intrinsic::maxnum:
2629 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) ||
2630 CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2631 case Intrinsic::minnum:
2632 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2633 CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2634 case Intrinsic::exp:
2635 case Intrinsic::exp2:
2636 case Intrinsic::fabs:
2637 case Intrinsic::sqrt:
2639 case Intrinsic::powi:
2640 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2641 // powi(x,n) is non-negative if n is even.
2642 if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2645 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2646 case Intrinsic::fma:
2647 case Intrinsic::fmuladd:
2648 // x*x+y is non-negative if y is non-negative.
2649 return I->getOperand(0) == I->getOperand(1) &&
2650 CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2657 /// If the specified value can be set by repeating the same byte in memory,
2658 /// return the i8 value that it is represented with. This is
2659 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2660 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2661 /// byte store (e.g. i16 0x1234), return null.
2662 Value *llvm::isBytewiseValue(Value *V) {
2663 // All byte-wide stores are splatable, even of arbitrary variables.
2664 if (V->getType()->isIntegerTy(8)) return V;
2666 // Handle 'null' ConstantArrayZero etc.
2667 if (Constant *C = dyn_cast<Constant>(V))
2668 if (C->isNullValue())
2669 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2671 // Constant float and double values can be handled as integer values if the
2672 // corresponding integer value is "byteable". An important case is 0.0.
2673 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2674 if (CFP->getType()->isFloatTy())
2675 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2676 if (CFP->getType()->isDoubleTy())
2677 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2678 // Don't handle long double formats, which have strange constraints.
2681 // We can handle constant integers that are multiple of 8 bits.
2682 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2683 if (CI->getBitWidth() % 8 == 0) {
2684 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2686 if (!CI->getValue().isSplat(8))
2688 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2692 // A ConstantDataArray/Vector is splatable if all its members are equal and
2694 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2695 Value *Elt = CA->getElementAsConstant(0);
2696 Value *Val = isBytewiseValue(Elt);
2700 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2701 if (CA->getElementAsConstant(I) != Elt)
2707 // Conceptually, we could handle things like:
2708 // %a = zext i8 %X to i16
2709 // %b = shl i16 %a, 8
2710 // %c = or i16 %a, %b
2711 // but until there is an example that actually needs this, it doesn't seem
2712 // worth worrying about.
2717 // This is the recursive version of BuildSubAggregate. It takes a few different
2718 // arguments. Idxs is the index within the nested struct From that we are
2719 // looking at now (which is of type IndexedType). IdxSkip is the number of
2720 // indices from Idxs that should be left out when inserting into the resulting
2721 // struct. To is the result struct built so far, new insertvalue instructions
2723 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2724 SmallVectorImpl<unsigned> &Idxs,
2726 Instruction *InsertBefore) {
2727 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2729 // Save the original To argument so we can modify it
2731 // General case, the type indexed by Idxs is a struct
2732 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2733 // Process each struct element recursively
2736 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2740 // Couldn't find any inserted value for this index? Cleanup
2741 while (PrevTo != OrigTo) {
2742 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2743 PrevTo = Del->getAggregateOperand();
2744 Del->eraseFromParent();
2746 // Stop processing elements
2750 // If we successfully found a value for each of our subaggregates
2754 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2755 // the struct's elements had a value that was inserted directly. In the latter
2756 // case, perhaps we can't determine each of the subelements individually, but
2757 // we might be able to find the complete struct somewhere.
2759 // Find the value that is at that particular spot
2760 Value *V = FindInsertedValue(From, Idxs);
2765 // Insert the value in the new (sub) aggregrate
2766 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2767 "tmp", InsertBefore);
2770 // This helper takes a nested struct and extracts a part of it (which is again a
2771 // struct) into a new value. For example, given the struct:
2772 // { a, { b, { c, d }, e } }
2773 // and the indices "1, 1" this returns
2776 // It does this by inserting an insertvalue for each element in the resulting
2777 // struct, as opposed to just inserting a single struct. This will only work if
2778 // each of the elements of the substruct are known (ie, inserted into From by an
2779 // insertvalue instruction somewhere).
2781 // All inserted insertvalue instructions are inserted before InsertBefore
2782 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2783 Instruction *InsertBefore) {
2784 assert(InsertBefore && "Must have someplace to insert!");
2785 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2787 Value *To = UndefValue::get(IndexedType);
2788 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2789 unsigned IdxSkip = Idxs.size();
2791 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2794 /// Given an aggregrate and an sequence of indices, see if
2795 /// the scalar value indexed is already around as a register, for example if it
2796 /// were inserted directly into the aggregrate.
2798 /// If InsertBefore is not null, this function will duplicate (modified)
2799 /// insertvalues when a part of a nested struct is extracted.
2800 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2801 Instruction *InsertBefore) {
2802 // Nothing to index? Just return V then (this is useful at the end of our
2804 if (idx_range.empty())
2806 // We have indices, so V should have an indexable type.
2807 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2808 "Not looking at a struct or array?");
2809 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2810 "Invalid indices for type?");
2812 if (Constant *C = dyn_cast<Constant>(V)) {
2813 C = C->getAggregateElement(idx_range[0]);
2814 if (!C) return nullptr;
2815 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2818 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2819 // Loop the indices for the insertvalue instruction in parallel with the
2820 // requested indices
2821 const unsigned *req_idx = idx_range.begin();
2822 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2823 i != e; ++i, ++req_idx) {
2824 if (req_idx == idx_range.end()) {
2825 // We can't handle this without inserting insertvalues
2829 // The requested index identifies a part of a nested aggregate. Handle
2830 // this specially. For example,
2831 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2832 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2833 // %C = extractvalue {i32, { i32, i32 } } %B, 1
2834 // This can be changed into
2835 // %A = insertvalue {i32, i32 } undef, i32 10, 0
2836 // %C = insertvalue {i32, i32 } %A, i32 11, 1
2837 // which allows the unused 0,0 element from the nested struct to be
2839 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2843 // This insert value inserts something else than what we are looking for.
2844 // See if the (aggregate) value inserted into has the value we are
2845 // looking for, then.
2847 return FindInsertedValue(I->getAggregateOperand(), idx_range,
2850 // If we end up here, the indices of the insertvalue match with those
2851 // requested (though possibly only partially). Now we recursively look at
2852 // the inserted value, passing any remaining indices.
2853 return FindInsertedValue(I->getInsertedValueOperand(),
2854 makeArrayRef(req_idx, idx_range.end()),
2858 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2859 // If we're extracting a value from an aggregate that was extracted from
2860 // something else, we can extract from that something else directly instead.
2861 // However, we will need to chain I's indices with the requested indices.
2863 // Calculate the number of indices required
2864 unsigned size = I->getNumIndices() + idx_range.size();
2865 // Allocate some space to put the new indices in
2866 SmallVector<unsigned, 5> Idxs;
2868 // Add indices from the extract value instruction
2869 Idxs.append(I->idx_begin(), I->idx_end());
2871 // Add requested indices
2872 Idxs.append(idx_range.begin(), idx_range.end());
2874 assert(Idxs.size() == size
2875 && "Number of indices added not correct?");
2877 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2879 // Otherwise, we don't know (such as, extracting from a function return value
2880 // or load instruction)
2884 /// Analyze the specified pointer to see if it can be expressed as a base
2885 /// pointer plus a constant offset. Return the base and offset to the caller.
2886 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2887 const DataLayout &DL) {
2888 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2889 APInt ByteOffset(BitWidth, 0);
2891 // We walk up the defs but use a visited set to handle unreachable code. In
2892 // that case, we stop after accumulating the cycle once (not that it
2894 SmallPtrSet<Value *, 16> Visited;
2895 while (Visited.insert(Ptr).second) {
2896 if (Ptr->getType()->isVectorTy())
2899 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2900 // If one of the values we have visited is an addrspacecast, then
2901 // the pointer type of this GEP may be different from the type
2902 // of the Ptr parameter which was passed to this function. This
2903 // means when we construct GEPOffset, we need to use the size
2904 // of GEP's pointer type rather than the size of the original
2906 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
2907 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2910 ByteOffset += GEPOffset.getSExtValue();
2912 Ptr = GEP->getPointerOperand();
2913 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2914 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2915 Ptr = cast<Operator>(Ptr)->getOperand(0);
2916 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2917 if (GA->isInterposable())
2919 Ptr = GA->getAliasee();
2924 Offset = ByteOffset.getSExtValue();
2928 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
2929 // Make sure the GEP has exactly three arguments.
2930 if (GEP->getNumOperands() != 3)
2933 // Make sure the index-ee is a pointer to array of i8.
2934 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2935 if (!AT || !AT->getElementType()->isIntegerTy(8))
2938 // Check to make sure that the first operand of the GEP is an integer and
2939 // has value 0 so that we are sure we're indexing into the initializer.
2940 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2941 if (!FirstIdx || !FirstIdx->isZero())
2947 /// This function computes the length of a null-terminated C string pointed to
2948 /// by V. If successful, it returns true and returns the string in Str.
2949 /// If unsuccessful, it returns false.
2950 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2951 uint64_t Offset, bool TrimAtNul) {
2954 // Look through bitcast instructions and geps.
2955 V = V->stripPointerCasts();
2957 // If the value is a GEP instruction or constant expression, treat it as an
2959 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2960 // The GEP operator should be based on a pointer to string constant, and is
2961 // indexing into the string constant.
2962 if (!isGEPBasedOnPointerToString(GEP))
2965 // If the second index isn't a ConstantInt, then this is a variable index
2966 // into the array. If this occurs, we can't say anything meaningful about
2968 uint64_t StartIdx = 0;
2969 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2970 StartIdx = CI->getZExtValue();
2973 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2977 // The GEP instruction, constant or instruction, must reference a global
2978 // variable that is a constant and is initialized. The referenced constant
2979 // initializer is the array that we'll use for optimization.
2980 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2981 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2984 // Handle the all-zeros case.
2985 if (GV->getInitializer()->isNullValue()) {
2986 // This is a degenerate case. The initializer is constant zero so the
2987 // length of the string must be zero.
2992 // This must be a ConstantDataArray.
2993 const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
2994 if (!Array || !Array->isString())
2997 // Get the number of elements in the array.
2998 uint64_t NumElts = Array->getType()->getArrayNumElements();
3000 // Start out with the entire array in the StringRef.
3001 Str = Array->getAsString();
3003 if (Offset > NumElts)
3006 // Skip over 'offset' bytes.
3007 Str = Str.substr(Offset);
3010 // Trim off the \0 and anything after it. If the array is not nul
3011 // terminated, we just return the whole end of string. The client may know
3012 // some other way that the string is length-bound.
3013 Str = Str.substr(0, Str.find('\0'));
3018 // These next two are very similar to the above, but also look through PHI
3020 // TODO: See if we can integrate these two together.
3022 /// If we can compute the length of the string pointed to by
3023 /// the specified pointer, return 'len+1'. If we can't, return 0.
3024 static uint64_t GetStringLengthH(const Value *V,
3025 SmallPtrSetImpl<const PHINode*> &PHIs) {
3026 // Look through noop bitcast instructions.
3027 V = V->stripPointerCasts();
3029 // If this is a PHI node, there are two cases: either we have already seen it
3031 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3032 if (!PHIs.insert(PN).second)
3033 return ~0ULL; // already in the set.
3035 // If it was new, see if all the input strings are the same length.
3036 uint64_t LenSoFar = ~0ULL;
3037 for (Value *IncValue : PN->incoming_values()) {
3038 uint64_t Len = GetStringLengthH(IncValue, PHIs);
3039 if (Len == 0) return 0; // Unknown length -> unknown.
3041 if (Len == ~0ULL) continue;
3043 if (Len != LenSoFar && LenSoFar != ~0ULL)
3044 return 0; // Disagree -> unknown.
3048 // Success, all agree.
3052 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3053 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3054 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
3055 if (Len1 == 0) return 0;
3056 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
3057 if (Len2 == 0) return 0;
3058 if (Len1 == ~0ULL) return Len2;
3059 if (Len2 == ~0ULL) return Len1;
3060 if (Len1 != Len2) return 0;
3064 // Otherwise, see if we can read the string.
3066 if (!getConstantStringInfo(V, StrData))
3069 return StrData.size()+1;
3072 /// If we can compute the length of the string pointed to by
3073 /// the specified pointer, return 'len+1'. If we can't, return 0.
3074 uint64_t llvm::GetStringLength(const Value *V) {
3075 if (!V->getType()->isPointerTy()) return 0;
3077 SmallPtrSet<const PHINode*, 32> PHIs;
3078 uint64_t Len = GetStringLengthH(V, PHIs);
3079 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3080 // an empty string as a length.
3081 return Len == ~0ULL ? 1 : Len;
3084 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
3085 /// previous iteration of the loop was referring to the same object as \p PN.
3086 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3087 const LoopInfo *LI) {
3088 // Find the loop-defined value.
3089 Loop *L = LI->getLoopFor(PN->getParent());
3090 if (PN->getNumIncomingValues() != 2)
3093 // Find the value from previous iteration.
3094 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3095 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3096 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3097 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3100 // If a new pointer is loaded in the loop, the pointer references a different
3101 // object in every iteration. E.g.:
3105 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3106 if (!L->isLoopInvariant(Load->getPointerOperand()))
3111 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3112 unsigned MaxLookup) {
3113 if (!V->getType()->isPointerTy())
3115 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3116 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3117 V = GEP->getPointerOperand();
3118 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3119 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3120 V = cast<Operator>(V)->getOperand(0);
3121 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3122 if (GA->isInterposable())
3124 V = GA->getAliasee();
3126 if (auto CS = CallSite(V))
3127 if (Value *RV = CS.getReturnedArgOperand()) {
3132 // See if InstructionSimplify knows any relevant tricks.
3133 if (Instruction *I = dyn_cast<Instruction>(V))
3134 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3135 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
3142 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3147 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3148 const DataLayout &DL, LoopInfo *LI,
3149 unsigned MaxLookup) {
3150 SmallPtrSet<Value *, 4> Visited;
3151 SmallVector<Value *, 4> Worklist;
3152 Worklist.push_back(V);
3154 Value *P = Worklist.pop_back_val();
3155 P = GetUnderlyingObject(P, DL, MaxLookup);
3157 if (!Visited.insert(P).second)
3160 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3161 Worklist.push_back(SI->getTrueValue());
3162 Worklist.push_back(SI->getFalseValue());
3166 if (PHINode *PN = dyn_cast<PHINode>(P)) {
3167 // If this PHI changes the underlying object in every iteration of the
3168 // loop, don't look through it. Consider:
3171 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3175 // Prev is tracking Curr one iteration behind so they refer to different
3176 // underlying objects.
3177 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3178 isSameUnderlyingObjectInLoop(PN, LI))
3179 for (Value *IncValue : PN->incoming_values())
3180 Worklist.push_back(IncValue);
3184 Objects.push_back(P);
3185 } while (!Worklist.empty());
3188 /// Return true if the only users of this pointer are lifetime markers.
3189 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3190 for (const User *U : V->users()) {
3191 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3192 if (!II) return false;
3194 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3195 II->getIntrinsicID() != Intrinsic::lifetime_end)
3201 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3202 const Instruction *CtxI,
3203 const DominatorTree *DT) {
3204 const Operator *Inst = dyn_cast<Operator>(V);
3208 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3209 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3213 switch (Inst->getOpcode()) {
3216 case Instruction::UDiv:
3217 case Instruction::URem: {
3218 // x / y is undefined if y == 0.
3220 if (match(Inst->getOperand(1), m_APInt(V)))
3224 case Instruction::SDiv:
3225 case Instruction::SRem: {
3226 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3227 const APInt *Numerator, *Denominator;
3228 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3230 // We cannot hoist this division if the denominator is 0.
3231 if (*Denominator == 0)
3233 // It's safe to hoist if the denominator is not 0 or -1.
3234 if (*Denominator != -1)
3236 // At this point we know that the denominator is -1. It is safe to hoist as
3237 // long we know that the numerator is not INT_MIN.
3238 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3239 return !Numerator->isMinSignedValue();
3240 // The numerator *might* be MinSignedValue.
3243 case Instruction::Load: {
3244 const LoadInst *LI = cast<LoadInst>(Inst);
3245 if (!LI->isUnordered() ||
3246 // Speculative load may create a race that did not exist in the source.
3247 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3248 // Speculative load may load data from dirty regions.
3249 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3251 const DataLayout &DL = LI->getModule()->getDataLayout();
3252 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3253 LI->getAlignment(), DL, CtxI, DT);
3255 case Instruction::Call: {
3256 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3257 switch (II->getIntrinsicID()) {
3258 // These synthetic intrinsics have no side-effects and just mark
3259 // information about their operands.
3260 // FIXME: There are other no-op synthetic instructions that potentially
3261 // should be considered at least *safe* to speculate...
3262 case Intrinsic::dbg_declare:
3263 case Intrinsic::dbg_value:
3266 case Intrinsic::bswap:
3267 case Intrinsic::ctlz:
3268 case Intrinsic::ctpop:
3269 case Intrinsic::cttz:
3270 case Intrinsic::objectsize:
3271 case Intrinsic::sadd_with_overflow:
3272 case Intrinsic::smul_with_overflow:
3273 case Intrinsic::ssub_with_overflow:
3274 case Intrinsic::uadd_with_overflow:
3275 case Intrinsic::umul_with_overflow:
3276 case Intrinsic::usub_with_overflow:
3278 // These intrinsics are defined to have the same behavior as libm
3279 // functions except for setting errno.
3280 case Intrinsic::sqrt:
3281 case Intrinsic::fma:
3282 case Intrinsic::fmuladd:
3284 // These intrinsics are defined to have the same behavior as libm
3285 // functions, and the corresponding libm functions never set errno.
3286 case Intrinsic::trunc:
3287 case Intrinsic::copysign:
3288 case Intrinsic::fabs:
3289 case Intrinsic::minnum:
3290 case Intrinsic::maxnum:
3292 // These intrinsics are defined to have the same behavior as libm
3293 // functions, which never overflow when operating on the IEEE754 types
3294 // that we support, and never set errno otherwise.
3295 case Intrinsic::ceil:
3296 case Intrinsic::floor:
3297 case Intrinsic::nearbyint:
3298 case Intrinsic::rint:
3299 case Intrinsic::round:
3301 // TODO: are convert_{from,to}_fp16 safe?
3302 // TODO: can we list target-specific intrinsics here?
3306 return false; // The called function could have undefined behavior or
3307 // side-effects, even if marked readnone nounwind.
3309 case Instruction::VAArg:
3310 case Instruction::Alloca:
3311 case Instruction::Invoke:
3312 case Instruction::PHI:
3313 case Instruction::Store:
3314 case Instruction::Ret:
3315 case Instruction::Br:
3316 case Instruction::IndirectBr:
3317 case Instruction::Switch:
3318 case Instruction::Unreachable:
3319 case Instruction::Fence:
3320 case Instruction::AtomicRMW:
3321 case Instruction::AtomicCmpXchg:
3322 case Instruction::LandingPad:
3323 case Instruction::Resume:
3324 case Instruction::CatchSwitch:
3325 case Instruction::CatchPad:
3326 case Instruction::CatchRet:
3327 case Instruction::CleanupPad:
3328 case Instruction::CleanupRet:
3329 return false; // Misc instructions which have effects
3333 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3334 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3337 /// Return true if we know that the specified value is never null.
3338 bool llvm::isKnownNonNull(const Value *V) {
3339 assert(V->getType()->isPointerTy() && "V must be pointer type");
3341 // Alloca never returns null, malloc might.
3342 if (isa<AllocaInst>(V)) return true;
3344 // A byval, inalloca, or nonnull argument is never null.
3345 if (const Argument *A = dyn_cast<Argument>(V))
3346 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3348 // A global variable in address space 0 is non null unless extern weak
3349 // or an absolute symbol reference. Other address spaces may have null as a
3350 // valid address for a global, so we can't assume anything.
3351 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3352 return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3353 GV->getType()->getAddressSpace() == 0;
3355 // A Load tagged with nonnull metadata is never null.
3356 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3357 return LI->getMetadata(LLVMContext::MD_nonnull);
3359 if (auto CS = ImmutableCallSite(V))
3360 if (CS.isReturnNonNull())
3366 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3367 const Instruction *CtxI,
3368 const DominatorTree *DT) {
3369 assert(V->getType()->isPointerTy() && "V must be pointer type");
3370 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
3371 assert(CtxI && "Context instruction required for analysis");
3372 assert(DT && "Dominator tree required for analysis");
3374 unsigned NumUsesExplored = 0;
3375 for (auto *U : V->users()) {
3376 // Avoid massive lists
3377 if (NumUsesExplored >= DomConditionsMaxUses)
3380 // Consider only compare instructions uniquely controlling a branch
3381 CmpInst::Predicate Pred;
3382 if (!match(const_cast<User *>(U),
3383 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3384 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3387 for (auto *CmpU : U->users()) {
3388 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3389 assert(BI->isConditional() && "uses a comparison!");
3391 BasicBlock *NonNullSuccessor =
3392 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3393 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3394 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3396 } else if (Pred == ICmpInst::ICMP_NE &&
3397 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3398 DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3407 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3408 const DominatorTree *DT) {
3409 if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
3412 if (isKnownNonNull(V))
3418 return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3421 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3423 const DataLayout &DL,
3424 AssumptionCache *AC,
3425 const Instruction *CxtI,
3426 const DominatorTree *DT) {
3427 // Multiplying n * m significant bits yields a result of n + m significant
3428 // bits. If the total number of significant bits does not exceed the
3429 // result bit width (minus 1), there is no overflow.
3430 // This means if we have enough leading zero bits in the operands
3431 // we can guarantee that the result does not overflow.
3432 // Ref: "Hacker's Delight" by Henry Warren
3433 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3434 APInt LHSKnownZero(BitWidth, 0);
3435 APInt LHSKnownOne(BitWidth, 0);
3436 APInt RHSKnownZero(BitWidth, 0);
3437 APInt RHSKnownOne(BitWidth, 0);
3438 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3440 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3442 // Note that underestimating the number of zero bits gives a more
3443 // conservative answer.
3444 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3445 RHSKnownZero.countLeadingOnes();
3446 // First handle the easy case: if we have enough zero bits there's
3447 // definitely no overflow.
3448 if (ZeroBits >= BitWidth)
3449 return OverflowResult::NeverOverflows;
3451 // Get the largest possible values for each operand.
3452 APInt LHSMax = ~LHSKnownZero;
3453 APInt RHSMax = ~RHSKnownZero;
3455 // We know the multiply operation doesn't overflow if the maximum values for
3456 // each operand will not overflow after we multiply them together.
3458 LHSMax.umul_ov(RHSMax, MaxOverflow);
3460 return OverflowResult::NeverOverflows;
3462 // We know it always overflows if multiplying the smallest possible values for
3463 // the operands also results in overflow.
3465 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3467 return OverflowResult::AlwaysOverflows;
3469 return OverflowResult::MayOverflow;
3472 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3474 const DataLayout &DL,
3475 AssumptionCache *AC,
3476 const Instruction *CxtI,
3477 const DominatorTree *DT) {
3478 bool LHSKnownNonNegative, LHSKnownNegative;
3479 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3481 if (LHSKnownNonNegative || LHSKnownNegative) {
3482 bool RHSKnownNonNegative, RHSKnownNegative;
3483 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3486 if (LHSKnownNegative && RHSKnownNegative) {
3487 // The sign bit is set in both cases: this MUST overflow.
3488 // Create a simple add instruction, and insert it into the struct.
3489 return OverflowResult::AlwaysOverflows;
3492 if (LHSKnownNonNegative && RHSKnownNonNegative) {
3493 // The sign bit is clear in both cases: this CANNOT overflow.
3494 // Create a simple add instruction, and insert it into the struct.
3495 return OverflowResult::NeverOverflows;
3499 return OverflowResult::MayOverflow;
3502 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3504 const AddOperator *Add,
3505 const DataLayout &DL,
3506 AssumptionCache *AC,
3507 const Instruction *CxtI,
3508 const DominatorTree *DT) {
3509 if (Add && Add->hasNoSignedWrap()) {
3510 return OverflowResult::NeverOverflows;
3513 bool LHSKnownNonNegative, LHSKnownNegative;
3514 bool RHSKnownNonNegative, RHSKnownNegative;
3515 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3517 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3520 if ((LHSKnownNonNegative && RHSKnownNegative) ||
3521 (LHSKnownNegative && RHSKnownNonNegative)) {
3522 // The sign bits are opposite: this CANNOT overflow.
3523 return OverflowResult::NeverOverflows;
3526 // The remaining code needs Add to be available. Early returns if not so.
3528 return OverflowResult::MayOverflow;
3530 // If the sign of Add is the same as at least one of the operands, this add
3531 // CANNOT overflow. This is particularly useful when the sum is
3532 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3534 bool LHSOrRHSKnownNonNegative =
3535 (LHSKnownNonNegative || RHSKnownNonNegative);
3536 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3537 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3538 bool AddKnownNonNegative, AddKnownNegative;
3539 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3540 /*Depth=*/0, AC, CxtI, DT);
3541 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3542 (AddKnownNegative && LHSOrRHSKnownNegative)) {
3543 return OverflowResult::NeverOverflows;
3547 return OverflowResult::MayOverflow;
3550 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3551 const DominatorTree &DT) {
3553 auto IID = II->getIntrinsicID();
3554 assert((IID == Intrinsic::sadd_with_overflow ||
3555 IID == Intrinsic::uadd_with_overflow ||
3556 IID == Intrinsic::ssub_with_overflow ||
3557 IID == Intrinsic::usub_with_overflow ||
3558 IID == Intrinsic::smul_with_overflow ||
3559 IID == Intrinsic::umul_with_overflow) &&
3560 "Not an overflow intrinsic!");
3563 SmallVector<const BranchInst *, 2> GuardingBranches;
3564 SmallVector<const ExtractValueInst *, 2> Results;
3566 for (const User *U : II->users()) {
3567 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3568 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3570 if (EVI->getIndices()[0] == 0)
3571 Results.push_back(EVI);
3573 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3575 for (const auto *U : EVI->users())
3576 if (const auto *B = dyn_cast<BranchInst>(U)) {
3577 assert(B->isConditional() && "How else is it using an i1?");
3578 GuardingBranches.push_back(B);
3582 // We are using the aggregate directly in a way we don't want to analyze
3583 // here (storing it to a global, say).
3588 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3589 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3590 if (!NoWrapEdge.isSingleEdge())
3593 // Check if all users of the add are provably no-wrap.
3594 for (const auto *Result : Results) {
3595 // If the extractvalue itself is not executed on overflow, the we don't
3596 // need to check each use separately, since domination is transitive.
3597 if (DT.dominates(NoWrapEdge, Result->getParent()))
3600 for (auto &RU : Result->uses())
3601 if (!DT.dominates(NoWrapEdge, RU))
3608 return any_of(GuardingBranches, AllUsesGuardedByBranch);
3612 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3613 const DataLayout &DL,
3614 AssumptionCache *AC,
3615 const Instruction *CxtI,
3616 const DominatorTree *DT) {
3617 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3618 Add, DL, AC, CxtI, DT);
3621 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3623 const DataLayout &DL,
3624 AssumptionCache *AC,
3625 const Instruction *CxtI,
3626 const DominatorTree *DT) {
3627 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3630 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3631 // A memory operation returns normally if it isn't volatile. A volatile
3632 // operation is allowed to trap.
3634 // An atomic operation isn't guaranteed to return in a reasonable amount of
3635 // time because it's possible for another thread to interfere with it for an
3636 // arbitrary length of time, but programs aren't allowed to rely on that.
3637 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3638 return !LI->isVolatile();
3639 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3640 return !SI->isVolatile();
3641 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3642 return !CXI->isVolatile();
3643 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3644 return !RMWI->isVolatile();
3645 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3646 return !MII->isVolatile();
3648 // If there is no successor, then execution can't transfer to it.
3649 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3650 return !CRI->unwindsToCaller();
3651 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3652 return !CatchSwitch->unwindsToCaller();
3653 if (isa<ResumeInst>(I))
3655 if (isa<ReturnInst>(I))
3658 // Calls can throw, or contain an infinite loop, or kill the process.
3659 if (auto CS = ImmutableCallSite(I)) {
3660 // Call sites that throw have implicit non-local control flow.
3661 if (!CS.doesNotThrow())
3664 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3665 // etc. and thus not return. However, LLVM already assumes that
3667 // - Thread exiting actions are modeled as writes to memory invisible to
3670 // - Loops that don't have side effects (side effects are volatile/atomic
3671 // stores and IO) always terminate (see http://llvm.org/PR965).
3672 // Furthermore IO itself is also modeled as writes to memory invisible to
3675 // We rely on those assumptions here, and use the memory effects of the call
3676 // target as a proxy for checking that it always returns.
3678 // FIXME: This isn't aggressive enough; a call which only writes to a global
3679 // is guaranteed to return.
3680 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3681 match(I, m_Intrinsic<Intrinsic::assume>());
3684 // Other instructions return normally.
3688 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3690 // The loop header is guaranteed to be executed for every iteration.
3692 // FIXME: Relax this constraint to cover all basic blocks that are
3693 // guaranteed to be executed at every iteration.
3694 if (I->getParent() != L->getHeader()) return false;
3696 for (const Instruction &LI : *L->getHeader()) {
3697 if (&LI == I) return true;
3698 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3700 llvm_unreachable("Instruction not contained in its own parent basic block.");
3703 bool llvm::propagatesFullPoison(const Instruction *I) {
3704 switch (I->getOpcode()) {
3705 case Instruction::Add:
3706 case Instruction::Sub:
3707 case Instruction::Xor:
3708 case Instruction::Trunc:
3709 case Instruction::BitCast:
3710 case Instruction::AddrSpaceCast:
3711 // These operations all propagate poison unconditionally. Note that poison
3712 // is not any particular value, so xor or subtraction of poison with
3713 // itself still yields poison, not zero.
3716 case Instruction::AShr:
3717 case Instruction::SExt:
3718 // For these operations, one bit of the input is replicated across
3719 // multiple output bits. A replicated poison bit is still poison.
3722 case Instruction::Shl: {
3723 // Left shift *by* a poison value is poison. The number of
3724 // positions to shift is unsigned, so no negative values are
3725 // possible there. Left shift by zero places preserves poison. So
3726 // it only remains to consider left shift of poison by a positive
3727 // number of places.
3729 // A left shift by a positive number of places leaves the lowest order bit
3730 // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3731 // make the poison operand violate that flag, yielding a fresh full-poison
3733 auto *OBO = cast<OverflowingBinaryOperator>(I);
3734 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3737 case Instruction::Mul: {
3738 // A multiplication by zero yields a non-poison zero result, so we need to
3739 // rule out zero as an operand. Conservatively, multiplication by a
3740 // non-zero constant is not multiplication by zero.
3742 // Multiplication by a non-zero constant can leave some bits
3743 // non-poisoned. For example, a multiplication by 2 leaves the lowest
3744 // order bit unpoisoned. So we need to consider that.
3746 // Multiplication by 1 preserves poison. If the multiplication has a
3747 // no-wrap flag, then we can make the poison operand violate that flag
3748 // when multiplied by any integer other than 0 and 1.
3749 auto *OBO = cast<OverflowingBinaryOperator>(I);
3750 if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3751 for (Value *V : OBO->operands()) {
3752 if (auto *CI = dyn_cast<ConstantInt>(V)) {
3753 // A ConstantInt cannot yield poison, so we can assume that it is
3754 // the other operand that is poison.
3755 return !CI->isZero();
3762 case Instruction::ICmp:
3763 // Comparing poison with any value yields poison. This is why, for
3764 // instance, x s< (x +nsw 1) can be folded to true.
3767 case Instruction::GetElementPtr:
3768 // A GEP implicitly represents a sequence of additions, subtractions,
3769 // truncations, sign extensions and multiplications. The multiplications
3770 // are by the non-zero sizes of some set of types, so we do not have to be
3771 // concerned with multiplication by zero. If the GEP is in-bounds, then
3772 // these operations are implicitly no-signed-wrap so poison is propagated
3773 // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3774 return cast<GEPOperator>(I)->isInBounds();
3781 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3782 switch (I->getOpcode()) {
3783 case Instruction::Store:
3784 return cast<StoreInst>(I)->getPointerOperand();
3786 case Instruction::Load:
3787 return cast<LoadInst>(I)->getPointerOperand();
3789 case Instruction::AtomicCmpXchg:
3790 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3792 case Instruction::AtomicRMW:
3793 return cast<AtomicRMWInst>(I)->getPointerOperand();
3795 case Instruction::UDiv:
3796 case Instruction::SDiv:
3797 case Instruction::URem:
3798 case Instruction::SRem:
3799 return I->getOperand(1);
3806 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3807 // We currently only look for uses of poison values within the same basic
3808 // block, as that makes it easier to guarantee that the uses will be
3809 // executed given that PoisonI is executed.
3811 // FIXME: Expand this to consider uses beyond the same basic block. To do
3812 // this, look out for the distinction between post-dominance and strong
3814 const BasicBlock *BB = PoisonI->getParent();
3816 // Set of instructions that we have proved will yield poison if PoisonI
3818 SmallSet<const Value *, 16> YieldsPoison;
3819 SmallSet<const BasicBlock *, 4> Visited;
3820 YieldsPoison.insert(PoisonI);
3821 Visited.insert(PoisonI->getParent());
3823 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3826 while (Iter++ < MaxDepth) {
3827 for (auto &I : make_range(Begin, End)) {
3828 if (&I != PoisonI) {
3829 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3830 if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3832 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3836 // Mark poison that propagates from I through uses of I.
3837 if (YieldsPoison.count(&I)) {
3838 for (const User *User : I.users()) {
3839 const Instruction *UserI = cast<Instruction>(User);
3840 if (propagatesFullPoison(UserI))
3841 YieldsPoison.insert(User);
3846 if (auto *NextBB = BB->getSingleSuccessor()) {
3847 if (Visited.insert(NextBB).second) {
3849 Begin = BB->getFirstNonPHI()->getIterator();
3860 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
3864 if (auto *C = dyn_cast<ConstantFP>(V))
3869 static bool isKnownNonZero(const Value *V) {
3870 if (auto *C = dyn_cast<ConstantFP>(V))
3871 return !C->isZero();
3875 /// Match non-obvious integer minimum and maximum sequences.
3876 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
3877 Value *CmpLHS, Value *CmpRHS,
3878 Value *TrueVal, Value *FalseVal,
3879 Value *&LHS, Value *&RHS) {
3880 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
3881 return {SPF_UNKNOWN, SPNB_NA, false};
3884 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
3885 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
3886 if (match(TrueVal, m_Zero()) &&
3887 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) {
3890 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
3894 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
3895 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
3896 if (match(FalseVal, m_Zero()) &&
3897 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) {
3900 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
3904 if (!match(CmpRHS, m_APInt(C1)))
3905 return {SPF_UNKNOWN, SPNB_NA, false};
3907 // An unsigned min/max can be written with a signed compare.
3909 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
3910 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
3911 // Is the sign bit set?
3912 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
3913 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
3914 if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue()) {
3917 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
3920 // Is the sign bit clear?
3921 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
3922 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
3923 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
3924 C2->isMinSignedValue()) {
3927 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
3931 // Look through 'not' ops to find disguised signed min/max.
3932 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
3933 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
3934 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
3935 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) {
3938 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
3941 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
3942 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
3943 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
3944 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) {
3947 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
3950 return {SPF_UNKNOWN, SPNB_NA, false};
3953 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3955 Value *CmpLHS, Value *CmpRHS,
3956 Value *TrueVal, Value *FalseVal,
3957 Value *&LHS, Value *&RHS) {
3961 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may
3962 // return inconsistent results between implementations.
3963 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3964 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3965 // Therefore we behave conservatively and only proceed if at least one of the
3966 // operands is known to not be zero, or if we don't care about signed zeroes.
3969 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3970 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3971 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3972 !isKnownNonZero(CmpRHS))
3973 return {SPF_UNKNOWN, SPNB_NA, false};
3976 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3977 bool Ordered = false;
3979 // When given one NaN and one non-NaN input:
3980 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3981 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3982 // ordered comparison fails), which could be NaN or non-NaN.
3983 // so here we discover exactly what NaN behavior is required/accepted.
3984 if (CmpInst::isFPPredicate(Pred)) {
3985 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3986 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3988 if (LHSSafe && RHSSafe) {
3989 // Both operands are known non-NaN.
3990 NaNBehavior = SPNB_RETURNS_ANY;
3991 } else if (CmpInst::isOrdered(Pred)) {
3992 // An ordered comparison will return false when given a NaN, so it
3996 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3997 NaNBehavior = SPNB_RETURNS_NAN;
3999 NaNBehavior = SPNB_RETURNS_OTHER;
4001 // Completely unsafe.
4002 return {SPF_UNKNOWN, SPNB_NA, false};
4005 // An unordered comparison will return true when given a NaN, so it
4008 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4009 NaNBehavior = SPNB_RETURNS_OTHER;
4011 NaNBehavior = SPNB_RETURNS_NAN;
4013 // Completely unsafe.
4014 return {SPF_UNKNOWN, SPNB_NA, false};
4018 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4019 std::swap(CmpLHS, CmpRHS);
4020 Pred = CmpInst::getSwappedPredicate(Pred);
4021 if (NaNBehavior == SPNB_RETURNS_NAN)
4022 NaNBehavior = SPNB_RETURNS_OTHER;
4023 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4024 NaNBehavior = SPNB_RETURNS_NAN;
4028 // ([if]cmp X, Y) ? X : Y
4029 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4031 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4032 case ICmpInst::ICMP_UGT:
4033 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4034 case ICmpInst::ICMP_SGT:
4035 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4036 case ICmpInst::ICMP_ULT:
4037 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4038 case ICmpInst::ICMP_SLT:
4039 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4040 case FCmpInst::FCMP_UGT:
4041 case FCmpInst::FCMP_UGE:
4042 case FCmpInst::FCMP_OGT:
4043 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4044 case FCmpInst::FCMP_ULT:
4045 case FCmpInst::FCMP_ULE:
4046 case FCmpInst::FCMP_OLT:
4047 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4052 if (match(CmpRHS, m_APInt(C1))) {
4053 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4054 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4056 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4057 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4058 if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) {
4059 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4062 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4063 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4064 if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) {
4065 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4070 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4073 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4074 Instruction::CastOps *CastOp) {
4075 CastInst *CI = dyn_cast<CastInst>(V1);
4076 Constant *C = dyn_cast<Constant>(V2);
4079 *CastOp = CI->getOpcode();
4081 if (auto *CI2 = dyn_cast<CastInst>(V2)) {
4082 // If V1 and V2 are both the same cast from the same type, we can look
4084 if (CI2->getOpcode() == CI->getOpcode() &&
4085 CI2->getSrcTy() == CI->getSrcTy())
4086 return CI2->getOperand(0);
4092 Constant *CastedTo = nullptr;
4094 if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
4095 CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy());
4097 if (isa<SExtInst>(CI) && CmpI->isSigned())
4098 CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy(), true);
4100 if (isa<TruncInst>(CI))
4101 CastedTo = ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
4103 if (isa<FPTruncInst>(CI))
4104 CastedTo = ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
4106 if (isa<FPExtInst>(CI))
4107 CastedTo = ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
4109 if (isa<FPToUIInst>(CI))
4110 CastedTo = ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
4112 if (isa<FPToSIInst>(CI))
4113 CastedTo = ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
4115 if (isa<UIToFPInst>(CI))
4116 CastedTo = ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
4118 if (isa<SIToFPInst>(CI))
4119 CastedTo = ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
4124 Constant *CastedBack =
4125 ConstantExpr::getCast(CI->getOpcode(), CastedTo, C->getType(), true);
4126 // Make sure the cast doesn't lose any information.
4127 if (CastedBack != C)
4133 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4134 Instruction::CastOps *CastOp) {
4135 SelectInst *SI = dyn_cast<SelectInst>(V);
4136 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4138 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4139 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4141 CmpInst::Predicate Pred = CmpI->getPredicate();
4142 Value *CmpLHS = CmpI->getOperand(0);
4143 Value *CmpRHS = CmpI->getOperand(1);
4144 Value *TrueVal = SI->getTrueValue();
4145 Value *FalseVal = SI->getFalseValue();
4147 if (isa<FPMathOperator>(CmpI))
4148 FMF = CmpI->getFastMathFlags();
4151 if (CmpI->isEquality())
4152 return {SPF_UNKNOWN, SPNB_NA, false};
4154 // Deal with type mismatches.
4155 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4156 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4157 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4158 cast<CastInst>(TrueVal)->getOperand(0), C,
4160 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4161 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4162 C, cast<CastInst>(FalseVal)->getOperand(0),
4165 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4169 /// Return true if "icmp Pred LHS RHS" is always true.
4170 static bool isTruePredicate(CmpInst::Predicate Pred,
4171 const Value *LHS, const Value *RHS,
4172 const DataLayout &DL, unsigned Depth,
4173 AssumptionCache *AC, const Instruction *CxtI,
4174 const DominatorTree *DT) {
4175 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4176 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4183 case CmpInst::ICMP_SLE: {
4186 // LHS s<= LHS +_{nsw} C if C >= 0
4187 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4188 return !C->isNegative();
4192 case CmpInst::ICMP_ULE: {
4195 // LHS u<= LHS +_{nuw} C for any C
4196 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4199 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4200 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4202 const APInt *&CA, const APInt *&CB) {
4203 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4204 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4207 // If X & C == 0 then (X | C) == X +_{nuw} C
4208 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4209 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4210 unsigned BitWidth = CA->getBitWidth();
4211 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4212 computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
4214 if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
4222 const APInt *CLHS, *CRHS;
4223 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4224 return CLHS->ule(*CRHS);
4231 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4232 /// ALHS ARHS" is true. Otherwise, return None.
4233 static Optional<bool>
4234 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4235 const Value *ARHS, const Value *BLHS,
4236 const Value *BRHS, const DataLayout &DL,
4237 unsigned Depth, AssumptionCache *AC,
4238 const Instruction *CxtI, const DominatorTree *DT) {
4243 case CmpInst::ICMP_SLT:
4244 case CmpInst::ICMP_SLE:
4245 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4247 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4251 case CmpInst::ICMP_ULT:
4252 case CmpInst::ICMP_ULE:
4253 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4255 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4261 /// Return true if the operands of the two compares match. IsSwappedOps is true
4262 /// when the operands match, but are swapped.
4263 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4264 const Value *BLHS, const Value *BRHS,
4265 bool &IsSwappedOps) {
4267 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4268 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4269 return IsMatchingOps || IsSwappedOps;
4272 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4273 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4274 /// BRHS" is false. Otherwise, return None if we can't infer anything.
4275 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4278 CmpInst::Predicate BPred,
4281 bool IsSwappedOps) {
4282 // Canonicalize the operands so they're matching.
4284 std::swap(BLHS, BRHS);
4285 BPred = ICmpInst::getSwappedPredicate(BPred);
4287 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4289 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4295 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4296 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4297 /// C2" is false. Otherwise, return None if we can't infer anything.
4298 static Optional<bool>
4299 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4300 const ConstantInt *C1,
4301 CmpInst::Predicate BPred,
4302 const Value *BLHS, const ConstantInt *C2) {
4303 assert(ALHS == BLHS && "LHS operands must match.");
4304 ConstantRange DomCR =
4305 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4307 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4308 ConstantRange Intersection = DomCR.intersectWith(CR);
4309 ConstantRange Difference = DomCR.difference(CR);
4310 if (Intersection.isEmptySet())
4312 if (Difference.isEmptySet())
4317 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4318 const DataLayout &DL, bool InvertAPred,
4319 unsigned Depth, AssumptionCache *AC,
4320 const Instruction *CxtI,
4321 const DominatorTree *DT) {
4322 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4323 if (LHS->getType() != RHS->getType())
4326 Type *OpTy = LHS->getType();
4327 assert(OpTy->getScalarType()->isIntegerTy(1));
4329 // LHS ==> RHS by definition
4330 if (!InvertAPred && LHS == RHS)
4333 if (OpTy->isVectorTy())
4334 // TODO: extending the code below to handle vectors
4336 assert(OpTy->isIntegerTy(1) && "implied by above");
4338 ICmpInst::Predicate APred, BPred;
4342 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4343 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4347 APred = CmpInst::getInversePredicate(APred);
4349 // Can we infer anything when the two compares have matching operands?
4351 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4352 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4353 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4355 // No amount of additional analysis will infer the second condition, so
4360 // Can we infer anything when the LHS operands match and the RHS operands are
4361 // constants (not necessarily matching)?
4362 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4363 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4364 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4365 cast<ConstantInt>(BRHS)))
4367 // No amount of additional analysis will infer the second condition, so
4373 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,