1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/KnownBits.h"
68 #include "llvm/Support/MathExtras.h"
77 using namespace llvm::PatternMatch;
79 const unsigned MaxDepth = 6;
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84 cl::Hidden, cl::init(20));
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89 if (unsigned BitWidth = Ty->getScalarSizeInBits())
92 return DL.getPointerTypeSizeInBits(Ty);
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
102 const DataLayout &DL;
104 const Instruction *CxtI;
105 const DominatorTree *DT;
107 // Unlike the other analyses, this may be a nullptr because not all clients
108 // provide it currently.
109 OptimizationRemarkEmitter *ORE;
111 /// Set of assumptions that should be excluded from further queries.
112 /// This is because of the potential for mutual recursion to cause
113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114 /// classic case of this is assume(x = y), which will attempt to determine
115 /// bits in x from bits in y, which will attempt to determine bits in y from
116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118 /// (all of which can call computeKnownBits), and so on.
119 std::array<const Value *, MaxDepth> Excluded;
121 unsigned NumExcluded = 0;
123 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
124 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
125 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
127 Query(const Query &Q, const Value *NewExcl)
128 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
129 NumExcluded(Q.NumExcluded) {
130 Excluded = Q.Excluded;
131 Excluded[NumExcluded++] = NewExcl;
132 assert(NumExcluded <= Excluded.size());
135 bool isExcluded(const Value *Value) const {
136 if (NumExcluded == 0)
138 auto End = Excluded.begin() + NumExcluded;
139 return std::find(Excluded.begin(), End, Value) != End;
143 } // end anonymous namespace
145 // Given the provided Value and, potentially, a context instruction, return
146 // the preferred context instruction (if any).
147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
148 // If we've been provided with a context instruction, then use that (provided
149 // it has been inserted).
150 if (CxtI && CxtI->getParent())
153 // If the value is really an already-inserted instruction, then use that.
154 CxtI = dyn_cast<Instruction>(V);
155 if (CxtI && CxtI->getParent())
161 static void computeKnownBits(const Value *V, KnownBits &Known,
162 unsigned Depth, const Query &Q);
164 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
165 const DataLayout &DL, unsigned Depth,
166 AssumptionCache *AC, const Instruction *CxtI,
167 const DominatorTree *DT,
168 OptimizationRemarkEmitter *ORE) {
169 ::computeKnownBits(V, Known, Depth,
170 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
173 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
177 unsigned Depth, AssumptionCache *AC,
178 const Instruction *CxtI,
179 const DominatorTree *DT,
180 OptimizationRemarkEmitter *ORE) {
181 return ::computeKnownBits(V, Depth,
182 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
186 const DataLayout &DL,
187 AssumptionCache *AC, const Instruction *CxtI,
188 const DominatorTree *DT) {
189 assert(LHS->getType() == RHS->getType() &&
190 "LHS and RHS should have the same type");
191 assert(LHS->getType()->isIntOrIntVectorTy() &&
192 "LHS and RHS should be integers");
193 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
194 KnownBits LHSKnown(IT->getBitWidth());
195 KnownBits RHSKnown(IT->getBitWidth());
196 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
197 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
198 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
201 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
202 for (const User *U : CxtI->users()) {
203 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
204 if (IC->isEquality())
205 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
206 if (C->isNullValue())
213 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
216 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
218 unsigned Depth, AssumptionCache *AC,
219 const Instruction *CxtI,
220 const DominatorTree *DT) {
221 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
222 Query(DL, AC, safeCxtI(V, CxtI), DT));
225 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
227 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
228 AssumptionCache *AC, const Instruction *CxtI,
229 const DominatorTree *DT) {
230 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
233 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
235 AssumptionCache *AC, const Instruction *CxtI,
236 const DominatorTree *DT) {
237 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
238 return Known.isNonNegative();
241 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
242 AssumptionCache *AC, const Instruction *CxtI,
243 const DominatorTree *DT) {
244 if (auto *CI = dyn_cast<ConstantInt>(V))
245 return CI->getValue().isStrictlyPositive();
247 // TODO: We'd doing two recursive queries here. We should factor this such
248 // that only a single query is needed.
249 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
250 isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
253 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
254 AssumptionCache *AC, const Instruction *CxtI,
255 const DominatorTree *DT) {
256 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
257 return Known.isNegative();
260 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
262 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
263 const DataLayout &DL,
264 AssumptionCache *AC, const Instruction *CxtI,
265 const DominatorTree *DT) {
266 return ::isKnownNonEqual(V1, V2, Query(DL, AC,
267 safeCxtI(V1, safeCxtI(V2, CxtI)),
271 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
274 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
275 const DataLayout &DL,
276 unsigned Depth, AssumptionCache *AC,
277 const Instruction *CxtI, const DominatorTree *DT) {
278 return ::MaskedValueIsZero(V, Mask, Depth,
279 Query(DL, AC, safeCxtI(V, CxtI), DT));
282 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
285 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
286 unsigned Depth, AssumptionCache *AC,
287 const Instruction *CxtI,
288 const DominatorTree *DT) {
289 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
292 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
294 KnownBits &KnownOut, KnownBits &Known2,
295 unsigned Depth, const Query &Q) {
296 unsigned BitWidth = KnownOut.getBitWidth();
298 // If an initial sequence of bits in the result is not needed, the
299 // corresponding bits in the operands are not needed.
300 KnownBits LHSKnown(BitWidth);
301 computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
302 computeKnownBits(Op1, Known2, Depth + 1, Q);
304 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
307 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
308 KnownBits &Known, KnownBits &Known2,
309 unsigned Depth, const Query &Q) {
310 unsigned BitWidth = Known.getBitWidth();
311 computeKnownBits(Op1, Known, Depth + 1, Q);
312 computeKnownBits(Op0, Known2, Depth + 1, Q);
314 bool isKnownNegative = false;
315 bool isKnownNonNegative = false;
316 // If the multiplication is known not to overflow, compute the sign bit.
319 // The product of a number with itself is non-negative.
320 isKnownNonNegative = true;
322 bool isKnownNonNegativeOp1 = Known.isNonNegative();
323 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
324 bool isKnownNegativeOp1 = Known.isNegative();
325 bool isKnownNegativeOp0 = Known2.isNegative();
326 // The product of two numbers with the same sign is non-negative.
327 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
328 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
329 // The product of a negative number and a non-negative number is either
331 if (!isKnownNonNegative)
332 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
333 isKnownNonZero(Op0, Depth, Q)) ||
334 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
335 isKnownNonZero(Op1, Depth, Q));
339 assert(!Known.hasConflict() && !Known2.hasConflict());
340 // Compute a conservative estimate for high known-0 bits.
341 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
342 Known2.countMinLeadingZeros(),
343 BitWidth) - BitWidth;
344 LeadZ = std::min(LeadZ, BitWidth);
346 // The result of the bottom bits of an integer multiply can be
347 // inferred by looking at the bottom bits of both operands and
348 // multiplying them together.
349 // We can infer at least the minimum number of known trailing bits
350 // of both operands. Depending on number of trailing zeros, we can
351 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
352 // a and b are divisible by m and n respectively.
353 // We then calculate how many of those bits are inferrable and set
354 // the output. For example, the i8 mul:
357 // We know the bottom 3 bits are zero since the first can be divided by
358 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
359 // Applying the multiplication to the trimmed arguments gets:
369 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
370 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
371 // The proof for this can be described as:
372 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
373 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
374 // umin(countTrailingZeros(C2), C6) +
375 // umin(C5 - umin(countTrailingZeros(C1), C5),
376 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
377 // %aa = shl i8 %a, C5
378 // %bb = shl i8 %b, C6
379 // %aaa = or i8 %aa, C1
380 // %bbb = or i8 %bb, C2
381 // %mul = mul i8 %aaa, %bbb
382 // %mask = and i8 %mul, C7
384 // %mask = i8 ((C1*C2)&C7)
385 // Where C5, C6 describe the known bits of %a, %b
386 // C1, C2 describe the known bottom bits of %a, %b.
387 // C7 describes the mask of the known bits of the result.
388 APInt Bottom0 = Known.One;
389 APInt Bottom1 = Known2.One;
391 // How many times we'd be able to divide each argument by 2 (shr by 1).
392 // This gives us the number of trailing zeros on the multiplication result.
393 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
394 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
395 unsigned TrailZero0 = Known.countMinTrailingZeros();
396 unsigned TrailZero1 = Known2.countMinTrailingZeros();
397 unsigned TrailZ = TrailZero0 + TrailZero1;
399 // Figure out the fewest known-bits operand.
400 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
401 TrailBitsKnown1 - TrailZero1);
402 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
404 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
405 Bottom1.getLoBits(TrailBitsKnown1);
408 Known.Zero.setHighBits(LeadZ);
409 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
410 Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
412 // Only make use of no-wrap flags if we failed to compute the sign bit
413 // directly. This matters if the multiplication always overflows, in
414 // which case we prefer to follow the result of the direct computation,
415 // though as the program is invoking undefined behaviour we can choose
416 // whatever we like here.
417 if (isKnownNonNegative && !Known.isNegative())
418 Known.makeNonNegative();
419 else if (isKnownNegative && !Known.isNonNegative())
420 Known.makeNegative();
423 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
425 unsigned BitWidth = Known.getBitWidth();
426 unsigned NumRanges = Ranges.getNumOperands() / 2;
427 assert(NumRanges >= 1);
429 Known.Zero.setAllBits();
430 Known.One.setAllBits();
432 for (unsigned i = 0; i < NumRanges; ++i) {
434 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
436 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
437 ConstantRange Range(Lower->getValue(), Upper->getValue());
439 // The first CommonPrefixBits of all values in Range are equal.
440 unsigned CommonPrefixBits =
441 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
443 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
444 Known.One &= Range.getUnsignedMax() & Mask;
445 Known.Zero &= ~Range.getUnsignedMax() & Mask;
449 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
450 SmallVector<const Value *, 16> WorkSet(1, I);
451 SmallPtrSet<const Value *, 32> Visited;
452 SmallPtrSet<const Value *, 16> EphValues;
454 // The instruction defining an assumption's condition itself is always
455 // considered ephemeral to that assumption (even if it has other
456 // non-ephemeral users). See r246696's test case for an example.
457 if (is_contained(I->operands(), E))
460 while (!WorkSet.empty()) {
461 const Value *V = WorkSet.pop_back_val();
462 if (!Visited.insert(V).second)
465 // If all uses of this value are ephemeral, then so is this value.
466 if (llvm::all_of(V->users(), [&](const User *U) {
467 return EphValues.count(U);
472 if (V == I || isSafeToSpeculativelyExecute(V)) {
474 if (const User *U = dyn_cast<User>(V))
475 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
477 WorkSet.push_back(*J);
485 // Is this an intrinsic that cannot be speculated but also cannot trap?
486 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
487 if (const CallInst *CI = dyn_cast<CallInst>(I))
488 if (Function *F = CI->getCalledFunction())
489 switch (F->getIntrinsicID()) {
491 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
492 case Intrinsic::assume:
493 case Intrinsic::sideeffect:
494 case Intrinsic::dbg_declare:
495 case Intrinsic::dbg_value:
496 case Intrinsic::invariant_start:
497 case Intrinsic::invariant_end:
498 case Intrinsic::lifetime_start:
499 case Intrinsic::lifetime_end:
500 case Intrinsic::objectsize:
501 case Intrinsic::ptr_annotation:
502 case Intrinsic::var_annotation:
509 bool llvm::isValidAssumeForContext(const Instruction *Inv,
510 const Instruction *CxtI,
511 const DominatorTree *DT) {
512 // There are two restrictions on the use of an assume:
513 // 1. The assume must dominate the context (or the control flow must
514 // reach the assume whenever it reaches the context).
515 // 2. The context must not be in the assume's set of ephemeral values
516 // (otherwise we will use the assume to prove that the condition
517 // feeding the assume is trivially true, thus causing the removal of
521 if (DT->dominates(Inv, CxtI))
523 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
524 // We don't have a DT, but this trivially dominates.
528 // With or without a DT, the only remaining case we will check is if the
529 // instructions are in the same BB. Give up if that is not the case.
530 if (Inv->getParent() != CxtI->getParent())
533 // If we have a dom tree, then we now know that the assume doens't dominate
534 // the other instruction. If we don't have a dom tree then we can check if
535 // the assume is first in the BB.
537 // Search forward from the assume until we reach the context (or the end
538 // of the block); the common case is that the assume will come first.
539 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
540 IE = Inv->getParent()->end(); I != IE; ++I)
545 // The context comes first, but they're both in the same block. Make sure
546 // there is nothing in between that might interrupt the control flow.
547 for (BasicBlock::const_iterator I =
548 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
550 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
553 return !isEphemeralValueOf(Inv, CxtI);
556 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
557 unsigned Depth, const Query &Q) {
558 // Use of assumptions is context-sensitive. If we don't have a context, we
560 if (!Q.AC || !Q.CxtI)
563 unsigned BitWidth = Known.getBitWidth();
565 // Note that the patterns below need to be kept in sync with the code
566 // in AssumptionCache::updateAffectedValues.
568 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
571 CallInst *I = cast<CallInst>(AssumeVH);
572 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
573 "Got assumption for the wrong function!");
577 // Warning: This loop can end up being somewhat performance sensetive.
578 // We're running this loop for once for each value queried resulting in a
579 // runtime of ~O(#assumes * #values).
581 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
582 "must be an assume intrinsic");
584 Value *Arg = I->getArgOperand(0);
586 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
587 assert(BitWidth == 1 && "assume operand is not i1?");
591 if (match(Arg, m_Not(m_Specific(V))) &&
592 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
593 assert(BitWidth == 1 && "assume operand is not i1?");
598 // The remaining tests are all recursive, so bail out if we hit the limit.
599 if (Depth == MaxDepth)
603 auto m_V = m_CombineOr(m_Specific(V),
604 m_CombineOr(m_PtrToInt(m_Specific(V)),
605 m_BitCast(m_Specific(V))));
607 CmpInst::Predicate Pred;
610 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
611 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612 KnownBits RHSKnown(BitWidth);
613 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
614 Known.Zero |= RHSKnown.Zero;
615 Known.One |= RHSKnown.One;
617 } else if (match(Arg,
618 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
619 Pred == ICmpInst::ICMP_EQ &&
620 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
621 KnownBits RHSKnown(BitWidth);
622 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
623 KnownBits MaskKnown(BitWidth);
624 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
626 // For those bits in the mask that are known to be one, we can propagate
627 // known bits from the RHS to V.
628 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
629 Known.One |= RHSKnown.One & MaskKnown.One;
630 // assume(~(v & b) = a)
631 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
633 Pred == ICmpInst::ICMP_EQ &&
634 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
635 KnownBits RHSKnown(BitWidth);
636 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
637 KnownBits MaskKnown(BitWidth);
638 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
640 // For those bits in the mask that are known to be one, we can propagate
641 // inverted known bits from the RHS to V.
642 Known.Zero |= RHSKnown.One & MaskKnown.One;
643 Known.One |= RHSKnown.Zero & MaskKnown.One;
645 } else if (match(Arg,
646 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
647 Pred == ICmpInst::ICMP_EQ &&
648 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
649 KnownBits RHSKnown(BitWidth);
650 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
651 KnownBits BKnown(BitWidth);
652 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
654 // For those bits in B that are known to be zero, we can propagate known
655 // bits from the RHS to V.
656 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
657 Known.One |= RHSKnown.One & BKnown.Zero;
658 // assume(~(v | b) = a)
659 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
661 Pred == ICmpInst::ICMP_EQ &&
662 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
663 KnownBits RHSKnown(BitWidth);
664 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
665 KnownBits BKnown(BitWidth);
666 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
668 // For those bits in B that are known to be zero, we can propagate
669 // inverted known bits from the RHS to V.
670 Known.Zero |= RHSKnown.One & BKnown.Zero;
671 Known.One |= RHSKnown.Zero & BKnown.Zero;
673 } else if (match(Arg,
674 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
675 Pred == ICmpInst::ICMP_EQ &&
676 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
677 KnownBits RHSKnown(BitWidth);
678 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
679 KnownBits BKnown(BitWidth);
680 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
682 // For those bits in B that are known to be zero, we can propagate known
683 // bits from the RHS to V. For those bits in B that are known to be one,
684 // we can propagate inverted known bits from the RHS to V.
685 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
686 Known.One |= RHSKnown.One & BKnown.Zero;
687 Known.Zero |= RHSKnown.One & BKnown.One;
688 Known.One |= RHSKnown.Zero & BKnown.One;
689 // assume(~(v ^ b) = a)
690 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
692 Pred == ICmpInst::ICMP_EQ &&
693 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694 KnownBits RHSKnown(BitWidth);
695 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696 KnownBits BKnown(BitWidth);
697 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
699 // For those bits in B that are known to be zero, we can propagate
700 // inverted known bits from the RHS to V. For those bits in B that are
701 // known to be one, we can propagate known bits from the RHS to V.
702 Known.Zero |= RHSKnown.One & BKnown.Zero;
703 Known.One |= RHSKnown.Zero & BKnown.Zero;
704 Known.Zero |= RHSKnown.Zero & BKnown.One;
705 Known.One |= RHSKnown.One & BKnown.One;
706 // assume(v << c = a)
707 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
709 Pred == ICmpInst::ICMP_EQ &&
710 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
712 KnownBits RHSKnown(BitWidth);
713 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
714 // For those bits in RHS that are known, we can propagate them to known
715 // bits in V shifted to the right by C.
716 RHSKnown.Zero.lshrInPlace(C);
717 Known.Zero |= RHSKnown.Zero;
718 RHSKnown.One.lshrInPlace(C);
719 Known.One |= RHSKnown.One;
720 // assume(~(v << c) = a)
721 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
723 Pred == ICmpInst::ICMP_EQ &&
724 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
726 KnownBits RHSKnown(BitWidth);
727 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728 // For those bits in RHS that are known, we can propagate them inverted
729 // to known bits in V shifted to the right by C.
730 RHSKnown.One.lshrInPlace(C);
731 Known.Zero |= RHSKnown.One;
732 RHSKnown.Zero.lshrInPlace(C);
733 Known.One |= RHSKnown.Zero;
734 // assume(v >> c = a)
735 } else if (match(Arg,
736 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
738 Pred == ICmpInst::ICMP_EQ &&
739 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
741 KnownBits RHSKnown(BitWidth);
742 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
743 // For those bits in RHS that are known, we can propagate them to known
744 // bits in V shifted to the right by C.
745 Known.Zero |= RHSKnown.Zero << C;
746 Known.One |= RHSKnown.One << C;
747 // assume(~(v >> c) = a)
748 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
750 Pred == ICmpInst::ICMP_EQ &&
751 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
753 KnownBits RHSKnown(BitWidth);
754 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
755 // For those bits in RHS that are known, we can propagate them inverted
756 // to known bits in V shifted to the right by C.
757 Known.Zero |= RHSKnown.One << C;
758 Known.One |= RHSKnown.Zero << C;
759 // assume(v >=_s c) where c is non-negative
760 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
761 Pred == ICmpInst::ICMP_SGE &&
762 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763 KnownBits RHSKnown(BitWidth);
764 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
766 if (RHSKnown.isNonNegative()) {
767 // We know that the sign bit is zero.
768 Known.makeNonNegative();
770 // assume(v >_s c) where c is at least -1.
771 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
772 Pred == ICmpInst::ICMP_SGT &&
773 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
774 KnownBits RHSKnown(BitWidth);
775 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
777 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
778 // We know that the sign bit is zero.
779 Known.makeNonNegative();
781 // assume(v <=_s c) where c is negative
782 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
783 Pred == ICmpInst::ICMP_SLE &&
784 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785 KnownBits RHSKnown(BitWidth);
786 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
788 if (RHSKnown.isNegative()) {
789 // We know that the sign bit is one.
790 Known.makeNegative();
792 // assume(v <_s c) where c is non-positive
793 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
794 Pred == ICmpInst::ICMP_SLT &&
795 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
796 KnownBits RHSKnown(BitWidth);
797 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
799 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
800 // We know that the sign bit is one.
801 Known.makeNegative();
804 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
805 Pred == ICmpInst::ICMP_ULE &&
806 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
807 KnownBits RHSKnown(BitWidth);
808 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
810 // Whatever high bits in c are zero are known to be zero.
811 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
813 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
814 Pred == ICmpInst::ICMP_ULT &&
815 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
816 KnownBits RHSKnown(BitWidth);
817 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
819 // Whatever high bits in c are zero are known to be zero (if c is a power
820 // of 2, then one more).
821 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
822 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
824 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
828 // If assumptions conflict with each other or previous known bits, then we
829 // have a logical fallacy. It's possible that the assumption is not reachable,
830 // so this isn't a real bug. On the other hand, the program may have undefined
831 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
832 // clear out the known bits, try to warn the user, and hope for the best.
833 if (Known.Zero.intersects(Known.One)) {
838 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
839 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
841 << "Detected conflicting code assumptions. Program may "
842 "have undefined behavior, or compiler may have "
848 /// Compute known bits from a shift operator, including those with a
849 /// non-constant shift amount. Known is the output of this function. Known2 is a
850 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
851 /// operator-specific functors that, given the known-zero or known-one bits
852 /// respectively, and a shift amount, compute the implied known-zero or
853 /// known-one bits of the shift operator's result respectively for that shift
854 /// amount. The results from calling KZF and KOF are conservatively combined for
855 /// all permitted shift amounts.
856 static void computeKnownBitsFromShiftOperator(
857 const Operator *I, KnownBits &Known, KnownBits &Known2,
858 unsigned Depth, const Query &Q,
859 function_ref<APInt(const APInt &, unsigned)> KZF,
860 function_ref<APInt(const APInt &, unsigned)> KOF) {
861 unsigned BitWidth = Known.getBitWidth();
863 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
864 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
866 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
867 Known.Zero = KZF(Known.Zero, ShiftAmt);
868 Known.One = KOF(Known.One, ShiftAmt);
869 // If the known bits conflict, this must be an overflowing left shift, so
870 // the shift result is poison. We can return anything we want. Choose 0 for
871 // the best folding opportunity.
872 if (Known.hasConflict())
878 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
880 // If the shift amount could be greater than or equal to the bit-width of the
881 // LHS, the value could be poison, but bail out because the check below is
882 // expensive. TODO: Should we just carry on?
883 if ((~Known.Zero).uge(BitWidth)) {
888 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
889 // BitWidth > 64 and any upper bits are known, we'll end up returning the
890 // limit value (which implies all bits are known).
891 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
892 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
894 // It would be more-clearly correct to use the two temporaries for this
895 // calculation. Reusing the APInts here to prevent unnecessary allocations.
898 // If we know the shifter operand is nonzero, we can sometimes infer more
899 // known bits. However this is expensive to compute, so be lazy about it and
900 // only compute it when absolutely necessary.
901 Optional<bool> ShifterOperandIsNonZero;
903 // Early exit if we can't constrain any well-defined shift amount.
904 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
905 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
906 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
907 if (!*ShifterOperandIsNonZero)
911 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
913 Known.Zero.setAllBits();
914 Known.One.setAllBits();
915 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
916 // Combine the shifted known input bits only for those shift amounts
917 // compatible with its known constraints.
918 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
920 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
922 // If we know the shifter is nonzero, we may be able to infer more known
923 // bits. This check is sunk down as far as possible to avoid the expensive
924 // call to isKnownNonZero if the cheaper checks above fail.
926 if (!ShifterOperandIsNonZero.hasValue())
927 ShifterOperandIsNonZero =
928 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
929 if (*ShifterOperandIsNonZero)
933 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
934 Known.One &= KOF(Known2.One, ShiftAmt);
937 // If the known bits conflict, the result is poison. Return a 0 and hope the
938 // caller can further optimize that.
939 if (Known.hasConflict())
943 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
944 unsigned Depth, const Query &Q) {
945 unsigned BitWidth = Known.getBitWidth();
947 KnownBits Known2(Known);
948 switch (I->getOpcode()) {
950 case Instruction::Load:
951 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
952 computeKnownBitsFromRangeMetadata(*MD, Known);
954 case Instruction::And: {
955 // If either the LHS or the RHS are Zero, the result is zero.
956 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
957 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
959 // Output known-1 bits are only known if set in both the LHS & RHS.
960 Known.One &= Known2.One;
961 // Output known-0 are known to be clear if zero in either the LHS | RHS.
962 Known.Zero |= Known2.Zero;
964 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
965 // here we handle the more general case of adding any odd number by
966 // matching the form add(x, add(x, y)) where y is odd.
967 // TODO: This could be generalized to clearing any bit set in y where the
968 // following bit is known to be unset in y.
970 if (!Known.Zero[0] && !Known.One[0] &&
971 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
973 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
976 computeKnownBits(Y, Known2, Depth + 1, Q);
977 if (Known2.countMinTrailingOnes() > 0)
978 Known.Zero.setBit(0);
982 case Instruction::Or:
983 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
984 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
986 // Output known-0 bits are only known if clear in both the LHS & RHS.
987 Known.Zero &= Known2.Zero;
988 // Output known-1 are known to be set if set in either the LHS | RHS.
989 Known.One |= Known2.One;
991 case Instruction::Xor: {
992 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
993 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
995 // Output known-0 bits are known if clear or set in both the LHS & RHS.
996 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
997 // Output known-1 are known to be set if set in only one of the LHS, RHS.
998 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
999 Known.Zero = std::move(KnownZeroOut);
1002 case Instruction::Mul: {
1003 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1004 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1008 case Instruction::UDiv: {
1009 // For the purposes of computing leading zeros we can conservatively
1010 // treat a udiv as a logical right shift by the power of 2 known to
1011 // be less than the denominator.
1012 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1013 unsigned LeadZ = Known2.countMinLeadingZeros();
1016 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1017 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1018 if (RHSMaxLeadingZeros != BitWidth)
1019 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1021 Known.Zero.setHighBits(LeadZ);
1024 case Instruction::Select: {
1025 const Value *LHS, *RHS;
1026 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1027 if (SelectPatternResult::isMinOrMax(SPF)) {
1028 computeKnownBits(RHS, Known, Depth + 1, Q);
1029 computeKnownBits(LHS, Known2, Depth + 1, Q);
1031 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1032 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1035 unsigned MaxHighOnes = 0;
1036 unsigned MaxHighZeros = 0;
1037 if (SPF == SPF_SMAX) {
1038 // If both sides are negative, the result is negative.
1039 if (Known.isNegative() && Known2.isNegative())
1040 // We can derive a lower bound on the result by taking the max of the
1041 // leading one bits.
1043 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1044 // If either side is non-negative, the result is non-negative.
1045 else if (Known.isNonNegative() || Known2.isNonNegative())
1047 } else if (SPF == SPF_SMIN) {
1048 // If both sides are non-negative, the result is non-negative.
1049 if (Known.isNonNegative() && Known2.isNonNegative())
1050 // We can derive an upper bound on the result by taking the max of the
1051 // leading zero bits.
1052 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1053 Known2.countMinLeadingZeros());
1054 // If either side is negative, the result is negative.
1055 else if (Known.isNegative() || Known2.isNegative())
1057 } else if (SPF == SPF_UMAX) {
1058 // We can derive a lower bound on the result by taking the max of the
1059 // leading one bits.
1061 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1062 } else if (SPF == SPF_UMIN) {
1063 // We can derive an upper bound on the result by taking the max of the
1064 // leading zero bits.
1066 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1069 // Only known if known in both the LHS and RHS.
1070 Known.One &= Known2.One;
1071 Known.Zero &= Known2.Zero;
1072 if (MaxHighOnes > 0)
1073 Known.One.setHighBits(MaxHighOnes);
1074 if (MaxHighZeros > 0)
1075 Known.Zero.setHighBits(MaxHighZeros);
1078 case Instruction::FPTrunc:
1079 case Instruction::FPExt:
1080 case Instruction::FPToUI:
1081 case Instruction::FPToSI:
1082 case Instruction::SIToFP:
1083 case Instruction::UIToFP:
1084 break; // Can't work with floating point.
1085 case Instruction::PtrToInt:
1086 case Instruction::IntToPtr:
1087 // Fall through and handle them the same as zext/trunc.
1089 case Instruction::ZExt:
1090 case Instruction::Trunc: {
1091 Type *SrcTy = I->getOperand(0)->getType();
1093 unsigned SrcBitWidth;
1094 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1095 // which fall through here.
1096 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1098 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1099 Known = Known.zextOrTrunc(SrcBitWidth);
1100 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1101 Known = Known.zextOrTrunc(BitWidth);
1102 // Any top bits are known to be zero.
1103 if (BitWidth > SrcBitWidth)
1104 Known.Zero.setBitsFrom(SrcBitWidth);
1107 case Instruction::BitCast: {
1108 Type *SrcTy = I->getOperand(0)->getType();
1109 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1110 // TODO: For now, not handling conversions like:
1111 // (bitcast i64 %x to <2 x i32>)
1112 !I->getType()->isVectorTy()) {
1113 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1118 case Instruction::SExt: {
1119 // Compute the bits in the result that are not present in the input.
1120 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1122 Known = Known.trunc(SrcBitWidth);
1123 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1124 // If the sign bit of the input is known set or clear, then we know the
1125 // top bits of the result.
1126 Known = Known.sext(BitWidth);
1129 case Instruction::Shl: {
1130 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1131 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1132 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1133 APInt KZResult = KnownZero << ShiftAmt;
1134 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1135 // If this shift has "nsw" keyword, then the result is either a poison
1136 // value or has the same sign bit as the first operand.
1137 if (NSW && KnownZero.isSignBitSet())
1138 KZResult.setSignBit();
1142 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1143 APInt KOResult = KnownOne << ShiftAmt;
1144 if (NSW && KnownOne.isSignBitSet())
1145 KOResult.setSignBit();
1149 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1152 case Instruction::LShr: {
1153 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1154 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1155 APInt KZResult = KnownZero.lshr(ShiftAmt);
1156 // High bits known zero.
1157 KZResult.setHighBits(ShiftAmt);
1161 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1162 return KnownOne.lshr(ShiftAmt);
1165 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1168 case Instruction::AShr: {
1169 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1170 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1171 return KnownZero.ashr(ShiftAmt);
1174 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1175 return KnownOne.ashr(ShiftAmt);
1178 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1181 case Instruction::Sub: {
1182 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1183 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1184 Known, Known2, Depth, Q);
1187 case Instruction::Add: {
1188 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1189 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1190 Known, Known2, Depth, Q);
1193 case Instruction::SRem:
1194 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1195 APInt RA = Rem->getValue().abs();
1196 if (RA.isPowerOf2()) {
1197 APInt LowBits = RA - 1;
1198 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1200 // The low bits of the first operand are unchanged by the srem.
1201 Known.Zero = Known2.Zero & LowBits;
1202 Known.One = Known2.One & LowBits;
1204 // If the first operand is non-negative or has all low bits zero, then
1205 // the upper bits are all zero.
1206 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1207 Known.Zero |= ~LowBits;
1209 // If the first operand is negative and not all low bits are zero, then
1210 // the upper bits are all one.
1211 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1212 Known.One |= ~LowBits;
1214 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1219 // The sign bit is the LHS's sign bit, except when the result of the
1220 // remainder is zero.
1221 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1222 // If it's known zero, our sign bit is also zero.
1223 if (Known2.isNonNegative())
1224 Known.makeNonNegative();
1227 case Instruction::URem: {
1228 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1229 const APInt &RA = Rem->getValue();
1230 if (RA.isPowerOf2()) {
1231 APInt LowBits = (RA - 1);
1232 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1233 Known.Zero |= ~LowBits;
1234 Known.One &= LowBits;
1239 // Since the result is less than or equal to either operand, any leading
1240 // zero bits in either operand must also exist in the result.
1241 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1242 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1245 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1247 Known.Zero.setHighBits(Leaders);
1251 case Instruction::Alloca: {
1252 const AllocaInst *AI = cast<AllocaInst>(I);
1253 unsigned Align = AI->getAlignment();
1255 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1258 Known.Zero.setLowBits(countTrailingZeros(Align));
1261 case Instruction::GetElementPtr: {
1262 // Analyze all of the subscripts of this getelementptr instruction
1263 // to determine if we can prove known low zero bits.
1264 KnownBits LocalKnown(BitWidth);
1265 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1266 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1268 gep_type_iterator GTI = gep_type_begin(I);
1269 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1270 Value *Index = I->getOperand(i);
1271 if (StructType *STy = GTI.getStructTypeOrNull()) {
1272 // Handle struct member offset arithmetic.
1274 // Handle case when index is vector zeroinitializer
1275 Constant *CIndex = cast<Constant>(Index);
1276 if (CIndex->isZeroValue())
1279 if (CIndex->getType()->isVectorTy())
1280 Index = CIndex->getSplatValue();
1282 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1283 const StructLayout *SL = Q.DL.getStructLayout(STy);
1284 uint64_t Offset = SL->getElementOffset(Idx);
1285 TrailZ = std::min<unsigned>(TrailZ,
1286 countTrailingZeros(Offset));
1288 // Handle array index arithmetic.
1289 Type *IndexedTy = GTI.getIndexedType();
1290 if (!IndexedTy->isSized()) {
1294 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1295 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1296 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1297 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1298 TrailZ = std::min(TrailZ,
1299 unsigned(countTrailingZeros(TypeSize) +
1300 LocalKnown.countMinTrailingZeros()));
1304 Known.Zero.setLowBits(TrailZ);
1307 case Instruction::PHI: {
1308 const PHINode *P = cast<PHINode>(I);
1309 // Handle the case of a simple two-predecessor recurrence PHI.
1310 // There's a lot more that could theoretically be done here, but
1311 // this is sufficient to catch some interesting cases.
1312 if (P->getNumIncomingValues() == 2) {
1313 for (unsigned i = 0; i != 2; ++i) {
1314 Value *L = P->getIncomingValue(i);
1315 Value *R = P->getIncomingValue(!i);
1316 Operator *LU = dyn_cast<Operator>(L);
1319 unsigned Opcode = LU->getOpcode();
1320 // Check for operations that have the property that if
1321 // both their operands have low zero bits, the result
1322 // will have low zero bits.
1323 if (Opcode == Instruction::Add ||
1324 Opcode == Instruction::Sub ||
1325 Opcode == Instruction::And ||
1326 Opcode == Instruction::Or ||
1327 Opcode == Instruction::Mul) {
1328 Value *LL = LU->getOperand(0);
1329 Value *LR = LU->getOperand(1);
1330 // Find a recurrence.
1337 // Ok, we have a PHI of the form L op= R. Check for low
1339 computeKnownBits(R, Known2, Depth + 1, Q);
1341 // We need to take the minimum number of known bits
1342 KnownBits Known3(Known);
1343 computeKnownBits(L, Known3, Depth + 1, Q);
1345 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1346 Known3.countMinTrailingZeros()));
1348 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1349 if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1350 // If initial value of recurrence is nonnegative, and we are adding
1351 // a nonnegative number with nsw, the result can only be nonnegative
1352 // or poison value regardless of the number of times we execute the
1353 // add in phi recurrence. If initial value is negative and we are
1354 // adding a negative number with nsw, the result can only be
1355 // negative or poison value. Similar arguments apply to sub and mul.
1357 // (add non-negative, non-negative) --> non-negative
1358 // (add negative, negative) --> negative
1359 if (Opcode == Instruction::Add) {
1360 if (Known2.isNonNegative() && Known3.isNonNegative())
1361 Known.makeNonNegative();
1362 else if (Known2.isNegative() && Known3.isNegative())
1363 Known.makeNegative();
1366 // (sub nsw non-negative, negative) --> non-negative
1367 // (sub nsw negative, non-negative) --> negative
1368 else if (Opcode == Instruction::Sub && LL == I) {
1369 if (Known2.isNonNegative() && Known3.isNegative())
1370 Known.makeNonNegative();
1371 else if (Known2.isNegative() && Known3.isNonNegative())
1372 Known.makeNegative();
1375 // (mul nsw non-negative, non-negative) --> non-negative
1376 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1377 Known3.isNonNegative())
1378 Known.makeNonNegative();
1386 // Unreachable blocks may have zero-operand PHI nodes.
1387 if (P->getNumIncomingValues() == 0)
1390 // Otherwise take the unions of the known bit sets of the operands,
1391 // taking conservative care to avoid excessive recursion.
1392 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1393 // Skip if every incoming value references to ourself.
1394 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1397 Known.Zero.setAllBits();
1398 Known.One.setAllBits();
1399 for (Value *IncValue : P->incoming_values()) {
1400 // Skip direct self references.
1401 if (IncValue == P) continue;
1403 Known2 = KnownBits(BitWidth);
1404 // Recurse, but cap the recursion to one level, because we don't
1405 // want to waste time spinning around in loops.
1406 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1407 Known.Zero &= Known2.Zero;
1408 Known.One &= Known2.One;
1409 // If all bits have been ruled out, there's no need to check
1411 if (!Known.Zero && !Known.One)
1417 case Instruction::Call:
1418 case Instruction::Invoke:
1419 // If range metadata is attached to this call, set known bits from that,
1420 // and then intersect with known bits based on other properties of the
1422 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1423 computeKnownBitsFromRangeMetadata(*MD, Known);
1424 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1425 computeKnownBits(RV, Known2, Depth + 1, Q);
1426 Known.Zero |= Known2.Zero;
1427 Known.One |= Known2.One;
1429 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1430 switch (II->getIntrinsicID()) {
1432 case Intrinsic::bitreverse:
1433 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1434 Known.Zero |= Known2.Zero.reverseBits();
1435 Known.One |= Known2.One.reverseBits();
1437 case Intrinsic::bswap:
1438 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1439 Known.Zero |= Known2.Zero.byteSwap();
1440 Known.One |= Known2.One.byteSwap();
1442 case Intrinsic::ctlz: {
1443 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1444 // If we have a known 1, its position is our upper bound.
1445 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1446 // If this call is undefined for 0, the result will be less than 2^n.
1447 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1448 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1449 unsigned LowBits = Log2_32(PossibleLZ)+1;
1450 Known.Zero.setBitsFrom(LowBits);
1453 case Intrinsic::cttz: {
1454 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1455 // If we have a known 1, its position is our upper bound.
1456 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1457 // If this call is undefined for 0, the result will be less than 2^n.
1458 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1459 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1460 unsigned LowBits = Log2_32(PossibleTZ)+1;
1461 Known.Zero.setBitsFrom(LowBits);
1464 case Intrinsic::ctpop: {
1465 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1466 // We can bound the space the count needs. Also, bits known to be zero
1467 // can't contribute to the population.
1468 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1469 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1470 Known.Zero.setBitsFrom(LowBits);
1471 // TODO: we could bound KnownOne using the lower bound on the number
1472 // of bits which might be set provided by popcnt KnownOne2.
1475 case Intrinsic::x86_sse42_crc32_64_64:
1476 Known.Zero.setBitsFrom(32);
1481 case Instruction::ExtractElement:
1482 // Look through extract element. At the moment we keep this simple and skip
1483 // tracking the specific element. But at least we might find information
1484 // valid for all elements of the vector (for example if vector is sign
1485 // extended, shifted, etc).
1486 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1488 case Instruction::ExtractValue:
1489 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1490 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1491 if (EVI->getNumIndices() != 1) break;
1492 if (EVI->getIndices()[0] == 0) {
1493 switch (II->getIntrinsicID()) {
1495 case Intrinsic::uadd_with_overflow:
1496 case Intrinsic::sadd_with_overflow:
1497 computeKnownBitsAddSub(true, II->getArgOperand(0),
1498 II->getArgOperand(1), false, Known, Known2,
1501 case Intrinsic::usub_with_overflow:
1502 case Intrinsic::ssub_with_overflow:
1503 computeKnownBitsAddSub(false, II->getArgOperand(0),
1504 II->getArgOperand(1), false, Known, Known2,
1507 case Intrinsic::umul_with_overflow:
1508 case Intrinsic::smul_with_overflow:
1509 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1510 Known, Known2, Depth, Q);
1518 /// Determine which bits of V are known to be either zero or one and return
1520 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1521 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1522 computeKnownBits(V, Known, Depth, Q);
1526 /// Determine which bits of V are known to be either zero or one and return
1527 /// them in the Known bit set.
1529 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1530 /// we cannot optimize based on the assumption that it is zero without changing
1531 /// it to be an explicit zero. If we don't change it to zero, other code could
1532 /// optimized based on the contradictory assumption that it is non-zero.
1533 /// Because instcombine aggressively folds operations with undef args anyway,
1534 /// this won't lose us code quality.
1536 /// This function is defined on values with integer type, values with pointer
1537 /// type, and vectors of integers. In the case
1538 /// where V is a vector, known zero, and known one values are the
1539 /// same width as the vector element, and the bit is set only if it is true
1540 /// for all of the elements in the vector.
1541 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1543 assert(V && "No Value?");
1544 assert(Depth <= MaxDepth && "Limit Search Depth");
1545 unsigned BitWidth = Known.getBitWidth();
1547 assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1548 V->getType()->isPtrOrPtrVectorTy()) &&
1549 "Not integer or pointer type!");
1550 assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
1551 "V and Known should have same BitWidth");
1555 if (match(V, m_APInt(C))) {
1556 // We know all of the bits for a scalar constant or a splat vector constant!
1558 Known.Zero = ~Known.One;
1561 // Null and aggregate-zero are all-zeros.
1562 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1566 // Handle a constant vector by taking the intersection of the known bits of
1568 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1569 // We know that CDS must be a vector of integers. Take the intersection of
1571 Known.Zero.setAllBits(); Known.One.setAllBits();
1572 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1573 APInt Elt = CDS->getElementAsAPInt(i);
1580 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1581 // We know that CV must be a vector of integers. Take the intersection of
1583 Known.Zero.setAllBits(); Known.One.setAllBits();
1584 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1585 Constant *Element = CV->getAggregateElement(i);
1586 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1591 const APInt &Elt = ElementCI->getValue();
1598 // Start out not knowing anything.
1601 // We can't imply anything about undefs.
1602 if (isa<UndefValue>(V))
1605 // There's no point in looking through other users of ConstantData for
1606 // assumptions. Confirm that we've handled them all.
1607 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1609 // Limit search depth.
1610 // All recursive calls that increase depth must come after this.
1611 if (Depth == MaxDepth)
1614 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1615 // the bits of its aliasee.
1616 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1617 if (!GA->isInterposable())
1618 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1622 if (const Operator *I = dyn_cast<Operator>(V))
1623 computeKnownBitsFromOperator(I, Known, Depth, Q);
1625 // Aligned pointers have trailing zeros - refine Known.Zero set
1626 if (V->getType()->isPointerTy()) {
1627 unsigned Align = V->getPointerAlignment(Q.DL);
1629 Known.Zero.setLowBits(countTrailingZeros(Align));
1632 // computeKnownBitsFromAssume strictly refines Known.
1633 // Therefore, we run them after computeKnownBitsFromOperator.
1635 // Check whether a nearby assume intrinsic can determine some known bits.
1636 computeKnownBitsFromAssume(V, Known, Depth, Q);
1638 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1641 /// Return true if the given value is known to have exactly one
1642 /// bit set when defined. For vectors return true if every element is known to
1643 /// be a power of two when defined. Supports values with integer or pointer
1644 /// types and vectors of integers.
1645 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1647 assert(Depth <= MaxDepth && "Limit Search Depth");
1649 if (const Constant *C = dyn_cast<Constant>(V)) {
1650 if (C->isNullValue())
1653 const APInt *ConstIntOrConstSplatInt;
1654 if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1655 return ConstIntOrConstSplatInt->isPowerOf2();
1658 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1659 // it is shifted off the end then the result is undefined.
1660 if (match(V, m_Shl(m_One(), m_Value())))
1663 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1664 // the bottom. If it is shifted off the bottom then the result is undefined.
1665 if (match(V, m_LShr(m_SignMask(), m_Value())))
1668 // The remaining tests are all recursive, so bail out if we hit the limit.
1669 if (Depth++ == MaxDepth)
1672 Value *X = nullptr, *Y = nullptr;
1673 // A shift left or a logical shift right of a power of two is a power of two
1675 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1676 match(V, m_LShr(m_Value(X), m_Value()))))
1677 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1679 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1680 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1682 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1683 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1684 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1686 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1687 // A power of two and'd with anything is a power of two or zero.
1688 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1689 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1691 // X & (-X) is always a power of two or zero.
1692 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1697 // Adding a power-of-two or zero to the same power-of-two or zero yields
1698 // either the original power-of-two, a larger power-of-two or zero.
1699 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1700 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1701 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1702 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1703 match(X, m_And(m_Value(), m_Specific(Y))))
1704 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1706 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1707 match(Y, m_And(m_Value(), m_Specific(X))))
1708 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1711 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1712 KnownBits LHSBits(BitWidth);
1713 computeKnownBits(X, LHSBits, Depth, Q);
1715 KnownBits RHSBits(BitWidth);
1716 computeKnownBits(Y, RHSBits, Depth, Q);
1717 // If i8 V is a power of two or zero:
1718 // ZeroBits: 1 1 1 0 1 1 1 1
1719 // ~ZeroBits: 0 0 0 1 0 0 0 0
1720 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1721 // If OrZero isn't set, we cannot give back a zero result.
1722 // Make sure either the LHS or RHS has a bit set.
1723 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1728 // An exact divide or right shift can only shift off zero bits, so the result
1729 // is a power of two only if the first operand is a power of two and not
1730 // copying a sign bit (sdiv int_min, 2).
1731 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1732 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1733 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1740 /// \brief Test whether a GEP's result is known to be non-null.
1742 /// Uses properties inherent in a GEP to try to determine whether it is known
1745 /// Currently this routine does not support vector GEPs.
1746 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1748 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1751 // FIXME: Support vector-GEPs.
1752 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1754 // If the base pointer is non-null, we cannot walk to a null address with an
1755 // inbounds GEP in address space zero.
1756 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1759 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1760 // If so, then the GEP cannot produce a null pointer, as doing so would
1761 // inherently violate the inbounds contract within address space zero.
1762 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1763 GTI != GTE; ++GTI) {
1764 // Struct types are easy -- they must always be indexed by a constant.
1765 if (StructType *STy = GTI.getStructTypeOrNull()) {
1766 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1767 unsigned ElementIdx = OpC->getZExtValue();
1768 const StructLayout *SL = Q.DL.getStructLayout(STy);
1769 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1770 if (ElementOffset > 0)
1775 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1776 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1779 // Fast path the constant operand case both for efficiency and so we don't
1780 // increment Depth when just zipping down an all-constant GEP.
1781 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1787 // We post-increment Depth here because while isKnownNonZero increments it
1788 // as well, when we pop back up that increment won't persist. We don't want
1789 // to recurse 10k times just because we have 10k GEP operands. We don't
1790 // bail completely out because we want to handle constant GEPs regardless
1792 if (Depth++ >= MaxDepth)
1795 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1802 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1803 const Instruction *CtxI,
1804 const DominatorTree *DT) {
1805 assert(V->getType()->isPointerTy() && "V must be pointer type");
1806 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1811 unsigned NumUsesExplored = 0;
1812 for (auto *U : V->users()) {
1813 // Avoid massive lists
1814 if (NumUsesExplored >= DomConditionsMaxUses)
1818 // If the value is used as an argument to a call or invoke, then argument
1819 // attributes may provide an answer about null-ness.
1820 if (auto CS = ImmutableCallSite(U))
1821 if (auto *CalledFunc = CS.getCalledFunction())
1822 for (const Argument &Arg : CalledFunc->args())
1823 if (CS.getArgOperand(Arg.getArgNo()) == V &&
1824 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1827 // Consider only compare instructions uniquely controlling a branch
1828 CmpInst::Predicate Pred;
1829 if (!match(const_cast<User *>(U),
1830 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1831 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1834 for (auto *CmpU : U->users()) {
1835 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
1836 assert(BI->isConditional() && "uses a comparison!");
1838 BasicBlock *NonNullSuccessor =
1839 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1840 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1841 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1843 } else if (Pred == ICmpInst::ICMP_NE &&
1844 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1845 DT->dominates(cast<Instruction>(CmpU), CtxI)) {
1854 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1855 /// ensure that the value it's attached to is never Value? 'RangeType' is
1856 /// is the type of the value described by the range.
1857 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1858 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1859 assert(NumRanges >= 1);
1860 for (unsigned i = 0; i < NumRanges; ++i) {
1861 ConstantInt *Lower =
1862 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1863 ConstantInt *Upper =
1864 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1865 ConstantRange Range(Lower->getValue(), Upper->getValue());
1866 if (Range.contains(Value))
1872 /// Return true if the given value is known to be non-zero when defined. For
1873 /// vectors, return true if every element is known to be non-zero when
1874 /// defined. For pointers, if the context instruction and dominator tree are
1875 /// specified, perform context-sensitive analysis and return true if the
1876 /// pointer couldn't possibly be null at the specified instruction.
1877 /// Supports values with integer or pointer type and vectors of integers.
1878 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1879 if (auto *C = dyn_cast<Constant>(V)) {
1880 if (C->isNullValue())
1882 if (isa<ConstantInt>(C))
1883 // Must be non-zero due to null test above.
1886 // For constant vectors, check that all elements are undefined or known
1887 // non-zero to determine that the whole vector is known non-zero.
1888 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1889 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1890 Constant *Elt = C->getAggregateElement(i);
1891 if (!Elt || Elt->isNullValue())
1893 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1899 // A global variable in address space 0 is non null unless extern weak
1900 // or an absolute symbol reference. Other address spaces may have null as a
1901 // valid address for a global, so we can't assume anything.
1902 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1903 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1904 GV->getType()->getAddressSpace() == 0)
1910 if (auto *I = dyn_cast<Instruction>(V)) {
1911 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1912 // If the possible ranges don't contain zero, then the value is
1913 // definitely non-zero.
1914 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1915 const APInt ZeroValue(Ty->getBitWidth(), 0);
1916 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1922 // Check for pointer simplifications.
1923 if (V->getType()->isPointerTy()) {
1924 // Alloca never returns null, malloc might.
1925 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1928 // A byval, inalloca, or nonnull argument is never null.
1929 if (const Argument *A = dyn_cast<Argument>(V))
1930 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1933 // A Load tagged with nonnull metadata is never null.
1934 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1935 if (LI->getMetadata(LLVMContext::MD_nonnull))
1938 if (auto CS = ImmutableCallSite(V))
1939 if (CS.isReturnNonNull())
1943 // The remaining tests are all recursive, so bail out if we hit the limit.
1944 if (Depth++ >= MaxDepth)
1947 // Check for recursive pointer simplifications.
1948 if (V->getType()->isPointerTy()) {
1949 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
1952 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1953 if (isGEPKnownNonNull(GEP, Depth, Q))
1957 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1959 // X | Y != 0 if X != 0 or Y != 0.
1960 Value *X = nullptr, *Y = nullptr;
1961 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1962 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1964 // ext X != 0 if X != 0.
1965 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1966 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1968 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1969 // if the lowest bit is shifted off the end.
1970 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1971 // shl nuw can't remove any non-zero bits.
1972 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1973 if (BO->hasNoUnsignedWrap())
1974 return isKnownNonZero(X, Depth, Q);
1976 KnownBits Known(BitWidth);
1977 computeKnownBits(X, Known, Depth, Q);
1981 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1982 // defined if the sign bit is shifted off the end.
1983 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1984 // shr exact can only shift out zero bits.
1985 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1987 return isKnownNonZero(X, Depth, Q);
1989 KnownBits Known = computeKnownBits(X, Depth, Q);
1990 if (Known.isNegative())
1993 // If the shifter operand is a constant, and all of the bits shifted
1994 // out are known to be zero, and X is known non-zero then at least one
1995 // non-zero bit must remain.
1996 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1997 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1998 // Is there a known one in the portion not shifted out?
1999 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2001 // Are all the bits to be shifted out known zero?
2002 if (Known.countMinTrailingZeros() >= ShiftVal)
2003 return isKnownNonZero(X, Depth, Q);
2006 // div exact can only produce a zero if the dividend is zero.
2007 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2008 return isKnownNonZero(X, Depth, Q);
2011 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2012 KnownBits XKnown = computeKnownBits(X, Depth, Q);
2013 KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2015 // If X and Y are both non-negative (as signed values) then their sum is not
2016 // zero unless both X and Y are zero.
2017 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2018 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2021 // If X and Y are both negative (as signed values) then their sum is not
2022 // zero unless both X and Y equal INT_MIN.
2023 if (XKnown.isNegative() && YKnown.isNegative()) {
2024 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2025 // The sign bit of X is set. If some other bit is set then X is not equal
2027 if (XKnown.One.intersects(Mask))
2029 // The sign bit of Y is set. If some other bit is set then Y is not equal
2031 if (YKnown.One.intersects(Mask))
2035 // The sum of a non-negative number and a power of two is not zero.
2036 if (XKnown.isNonNegative() &&
2037 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2039 if (YKnown.isNonNegative() &&
2040 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2044 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2045 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2046 // If X and Y are non-zero then so is X * Y as long as the multiplication
2047 // does not overflow.
2048 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2049 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2052 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2053 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2054 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2055 isKnownNonZero(SI->getFalseValue(), Depth, Q))
2059 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2060 // Try and detect a recurrence that monotonically increases from a
2061 // starting value, as these are common as induction variables.
2062 if (PN->getNumIncomingValues() == 2) {
2063 Value *Start = PN->getIncomingValue(0);
2064 Value *Induction = PN->getIncomingValue(1);
2065 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2066 std::swap(Start, Induction);
2067 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2068 if (!C->isZero() && !C->isNegative()) {
2070 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2071 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2077 // Check if all incoming values are non-zero constant.
2078 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2079 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2081 if (AllNonZeroConstants)
2085 KnownBits Known(BitWidth);
2086 computeKnownBits(V, Known, Depth, Q);
2087 return Known.One != 0;
2090 /// Return true if V2 == V1 + X, where X is known non-zero.
2091 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2092 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2093 if (!BO || BO->getOpcode() != Instruction::Add)
2095 Value *Op = nullptr;
2096 if (V2 == BO->getOperand(0))
2097 Op = BO->getOperand(1);
2098 else if (V2 == BO->getOperand(1))
2099 Op = BO->getOperand(0);
2102 return isKnownNonZero(Op, 0, Q);
2105 /// Return true if it is known that V1 != V2.
2106 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2109 if (V1->getType() != V2->getType())
2110 // We can't look through casts yet.
2112 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2115 if (V1->getType()->isIntOrIntVectorTy()) {
2116 // Are any known bits in V1 contradictory to known bits in V2? If V1
2117 // has a known zero where V2 has a known one, they must not be equal.
2118 KnownBits Known1 = computeKnownBits(V1, 0, Q);
2119 KnownBits Known2 = computeKnownBits(V2, 0, Q);
2121 if (Known1.Zero.intersects(Known2.One) ||
2122 Known2.Zero.intersects(Known1.One))
2128 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2129 /// simplify operations downstream. Mask is known to be zero for bits that V
2132 /// This function is defined on values with integer type, values with pointer
2133 /// type, and vectors of integers. In the case
2134 /// where V is a vector, the mask, known zero, and known one values are the
2135 /// same width as the vector element, and the bit is set only if it is true
2136 /// for all of the elements in the vector.
2137 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2139 KnownBits Known(Mask.getBitWidth());
2140 computeKnownBits(V, Known, Depth, Q);
2141 return Mask.isSubsetOf(Known.Zero);
2144 /// For vector constants, loop over the elements and find the constant with the
2145 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2146 /// or if any element was not analyzed; otherwise, return the count for the
2147 /// element with the minimum number of sign bits.
2148 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2150 const auto *CV = dyn_cast<Constant>(V);
2151 if (!CV || !CV->getType()->isVectorTy())
2154 unsigned MinSignBits = TyBits;
2155 unsigned NumElts = CV->getType()->getVectorNumElements();
2156 for (unsigned i = 0; i != NumElts; ++i) {
2157 // If we find a non-ConstantInt, bail out.
2158 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2162 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2168 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2171 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2173 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2174 assert(Result > 0 && "At least one sign bit needs to be present!");
2178 /// Return the number of times the sign bit of the register is replicated into
2179 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2180 /// (itself), but other cases can give us information. For example, immediately
2181 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2182 /// other, so we return 3. For vectors, return the number of sign bits for the
2183 /// vector element with the mininum number of known sign bits.
2184 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2186 assert(Depth <= MaxDepth && "Limit Search Depth");
2188 // We return the minimum number of sign bits that are guaranteed to be present
2189 // in V, so for undef we have to conservatively return 1. We don't have the
2190 // same behavior for poison though -- that's a FIXME today.
2192 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2194 unsigned FirstAnswer = 1;
2196 // Note that ConstantInt is handled by the general computeKnownBits case
2199 if (Depth == MaxDepth)
2200 return 1; // Limit search depth.
2202 const Operator *U = dyn_cast<Operator>(V);
2203 switch (Operator::getOpcode(V)) {
2205 case Instruction::SExt:
2206 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2207 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2209 case Instruction::SDiv: {
2210 const APInt *Denominator;
2211 // sdiv X, C -> adds log(C) sign bits.
2212 if (match(U->getOperand(1), m_APInt(Denominator))) {
2214 // Ignore non-positive denominator.
2215 if (!Denominator->isStrictlyPositive())
2218 // Calculate the incoming numerator bits.
2219 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2221 // Add floor(log(C)) bits to the numerator bits.
2222 return std::min(TyBits, NumBits + Denominator->logBase2());
2227 case Instruction::SRem: {
2228 const APInt *Denominator;
2229 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2230 // positive constant. This let us put a lower bound on the number of sign
2232 if (match(U->getOperand(1), m_APInt(Denominator))) {
2234 // Ignore non-positive denominator.
2235 if (!Denominator->isStrictlyPositive())
2238 // Calculate the incoming numerator bits. SRem by a positive constant
2239 // can't lower the number of sign bits.
2241 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2243 // Calculate the leading sign bit constraints by examining the
2244 // denominator. Given that the denominator is positive, there are two
2247 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2248 // (1 << ceilLogBase2(C)).
2250 // 2. the numerator is negative. Then the result range is (-C,0] and
2251 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2253 // Thus a lower bound on the number of sign bits is `TyBits -
2254 // ceilLogBase2(C)`.
2256 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2257 return std::max(NumrBits, ResBits);
2262 case Instruction::AShr: {
2263 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2264 // ashr X, C -> adds C sign bits. Vectors too.
2266 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2267 if (ShAmt->uge(TyBits))
2268 break; // Bad shift.
2269 unsigned ShAmtLimited = ShAmt->getZExtValue();
2270 Tmp += ShAmtLimited;
2271 if (Tmp > TyBits) Tmp = TyBits;
2275 case Instruction::Shl: {
2277 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2278 // shl destroys sign bits.
2279 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2280 if (ShAmt->uge(TyBits) || // Bad shift.
2281 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2282 Tmp2 = ShAmt->getZExtValue();
2287 case Instruction::And:
2288 case Instruction::Or:
2289 case Instruction::Xor: // NOT is handled here.
2290 // Logical binary ops preserve the number of sign bits at the worst.
2291 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2293 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2294 FirstAnswer = std::min(Tmp, Tmp2);
2295 // We computed what we know about the sign bits as our first
2296 // answer. Now proceed to the generic code that uses
2297 // computeKnownBits, and pick whichever answer is better.
2301 case Instruction::Select:
2302 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2303 if (Tmp == 1) return 1; // Early out.
2304 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2305 return std::min(Tmp, Tmp2);
2307 case Instruction::Add:
2308 // Add can have at most one carry bit. Thus we know that the output
2309 // is, at worst, one more bit than the inputs.
2310 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2311 if (Tmp == 1) return 1; // Early out.
2313 // Special case decrementing a value (ADD X, -1):
2314 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2315 if (CRHS->isAllOnesValue()) {
2316 KnownBits Known(TyBits);
2317 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2319 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2321 if ((Known.Zero | 1).isAllOnesValue())
2324 // If we are subtracting one from a positive number, there is no carry
2325 // out of the result.
2326 if (Known.isNonNegative())
2330 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2331 if (Tmp2 == 1) return 1;
2332 return std::min(Tmp, Tmp2)-1;
2334 case Instruction::Sub:
2335 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2336 if (Tmp2 == 1) return 1;
2339 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2340 if (CLHS->isNullValue()) {
2341 KnownBits Known(TyBits);
2342 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2343 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2345 if ((Known.Zero | 1).isAllOnesValue())
2348 // If the input is known to be positive (the sign bit is known clear),
2349 // the output of the NEG has the same number of sign bits as the input.
2350 if (Known.isNonNegative())
2353 // Otherwise, we treat this like a SUB.
2356 // Sub can have at most one carry bit. Thus we know that the output
2357 // is, at worst, one more bit than the inputs.
2358 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2359 if (Tmp == 1) return 1; // Early out.
2360 return std::min(Tmp, Tmp2)-1;
2362 case Instruction::Mul: {
2363 // The output of the Mul can be at most twice the valid bits in the inputs.
2364 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2365 if (SignBitsOp0 == 1) return 1; // Early out.
2366 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2367 if (SignBitsOp1 == 1) return 1;
2368 unsigned OutValidBits =
2369 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2370 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2373 case Instruction::PHI: {
2374 const PHINode *PN = cast<PHINode>(U);
2375 unsigned NumIncomingValues = PN->getNumIncomingValues();
2376 // Don't analyze large in-degree PHIs.
2377 if (NumIncomingValues > 4) break;
2378 // Unreachable blocks may have zero-operand PHI nodes.
2379 if (NumIncomingValues == 0) break;
2381 // Take the minimum of all incoming values. This can't infinitely loop
2382 // because of our depth threshold.
2383 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2384 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2385 if (Tmp == 1) return Tmp;
2387 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2392 case Instruction::Trunc:
2393 // FIXME: it's tricky to do anything useful for this, but it is an important
2394 // case for targets like X86.
2397 case Instruction::ExtractElement:
2398 // Look through extract element. At the moment we keep this simple and skip
2399 // tracking the specific element. But at least we might find information
2400 // valid for all elements of the vector (for example if vector is sign
2401 // extended, shifted, etc).
2402 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2405 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2406 // use this information.
2408 // If we can examine all elements of a vector constant successfully, we're
2409 // done (we can't do any better than that). If not, keep trying.
2410 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2413 KnownBits Known(TyBits);
2414 computeKnownBits(V, Known, Depth, Q);
2416 // If we know that the sign bit is either zero or one, determine the number of
2417 // identical bits in the top of the input value.
2418 return std::max(FirstAnswer, Known.countMinSignBits());
2421 /// This function computes the integer multiple of Base that equals V.
2422 /// If successful, it returns true and returns the multiple in
2423 /// Multiple. If unsuccessful, it returns false. It looks
2424 /// through SExt instructions only if LookThroughSExt is true.
2425 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2426 bool LookThroughSExt, unsigned Depth) {
2427 const unsigned MaxDepth = 6;
2429 assert(V && "No Value?");
2430 assert(Depth <= MaxDepth && "Limit Search Depth");
2431 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2433 Type *T = V->getType();
2435 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2445 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2446 Constant *BaseVal = ConstantInt::get(T, Base);
2447 if (CO && CO == BaseVal) {
2449 Multiple = ConstantInt::get(T, 1);
2453 if (CI && CI->getZExtValue() % Base == 0) {
2454 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2458 if (Depth == MaxDepth) return false; // Limit search depth.
2460 Operator *I = dyn_cast<Operator>(V);
2461 if (!I) return false;
2463 switch (I->getOpcode()) {
2465 case Instruction::SExt:
2466 if (!LookThroughSExt) return false;
2467 // otherwise fall through to ZExt
2469 case Instruction::ZExt:
2470 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2471 LookThroughSExt, Depth+1);
2472 case Instruction::Shl:
2473 case Instruction::Mul: {
2474 Value *Op0 = I->getOperand(0);
2475 Value *Op1 = I->getOperand(1);
2477 if (I->getOpcode() == Instruction::Shl) {
2478 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2479 if (!Op1CI) return false;
2480 // Turn Op0 << Op1 into Op0 * 2^Op1
2481 APInt Op1Int = Op1CI->getValue();
2482 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2483 APInt API(Op1Int.getBitWidth(), 0);
2484 API.setBit(BitToSet);
2485 Op1 = ConstantInt::get(V->getContext(), API);
2488 Value *Mul0 = nullptr;
2489 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2490 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2491 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2492 if (Op1C->getType()->getPrimitiveSizeInBits() <
2493 MulC->getType()->getPrimitiveSizeInBits())
2494 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2495 if (Op1C->getType()->getPrimitiveSizeInBits() >
2496 MulC->getType()->getPrimitiveSizeInBits())
2497 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2499 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2500 Multiple = ConstantExpr::getMul(MulC, Op1C);
2504 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2505 if (Mul0CI->getValue() == 1) {
2506 // V == Base * Op1, so return Op1
2512 Value *Mul1 = nullptr;
2513 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2514 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2515 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2516 if (Op0C->getType()->getPrimitiveSizeInBits() <
2517 MulC->getType()->getPrimitiveSizeInBits())
2518 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2519 if (Op0C->getType()->getPrimitiveSizeInBits() >
2520 MulC->getType()->getPrimitiveSizeInBits())
2521 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2523 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2524 Multiple = ConstantExpr::getMul(MulC, Op0C);
2528 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2529 if (Mul1CI->getValue() == 1) {
2530 // V == Base * Op0, so return Op0
2538 // We could not determine if V is a multiple of Base.
2542 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2543 const TargetLibraryInfo *TLI) {
2544 const Function *F = ICS.getCalledFunction();
2546 return Intrinsic::not_intrinsic;
2548 if (F->isIntrinsic())
2549 return F->getIntrinsicID();
2552 return Intrinsic::not_intrinsic;
2555 // We're going to make assumptions on the semantics of the functions, check
2556 // that the target knows that it's available in this environment and it does
2557 // not have local linkage.
2558 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2559 return Intrinsic::not_intrinsic;
2561 if (!ICS.onlyReadsMemory())
2562 return Intrinsic::not_intrinsic;
2564 // Otherwise check if we have a call to a function that can be turned into a
2565 // vector intrinsic.
2572 return Intrinsic::sin;
2576 return Intrinsic::cos;
2580 return Intrinsic::exp;
2584 return Intrinsic::exp2;
2588 return Intrinsic::log;
2590 case LibFunc_log10f:
2591 case LibFunc_log10l:
2592 return Intrinsic::log10;
2596 return Intrinsic::log2;
2600 return Intrinsic::fabs;
2604 return Intrinsic::minnum;
2608 return Intrinsic::maxnum;
2609 case LibFunc_copysign:
2610 case LibFunc_copysignf:
2611 case LibFunc_copysignl:
2612 return Intrinsic::copysign;
2614 case LibFunc_floorf:
2615 case LibFunc_floorl:
2616 return Intrinsic::floor;
2620 return Intrinsic::ceil;
2622 case LibFunc_truncf:
2623 case LibFunc_truncl:
2624 return Intrinsic::trunc;
2628 return Intrinsic::rint;
2629 case LibFunc_nearbyint:
2630 case LibFunc_nearbyintf:
2631 case LibFunc_nearbyintl:
2632 return Intrinsic::nearbyint;
2634 case LibFunc_roundf:
2635 case LibFunc_roundl:
2636 return Intrinsic::round;
2640 return Intrinsic::pow;
2644 return Intrinsic::sqrt;
2647 return Intrinsic::not_intrinsic;
2650 /// Return true if we can prove that the specified FP value is never equal to
2653 /// NOTE: this function will need to be revisited when we support non-default
2655 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2657 if (auto *CFP = dyn_cast<ConstantFP>(V))
2658 return !CFP->getValueAPF().isNegZero();
2660 // Limit search depth.
2661 if (Depth == MaxDepth)
2664 auto *Op = dyn_cast<Operator>(V);
2668 // Check if the nsz fast-math flag is set.
2669 if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2670 if (FPO->hasNoSignedZeros())
2673 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2674 if (match(Op, m_FAdd(m_Value(), m_Zero())))
2677 // sitofp and uitofp turn into +0.0 for zero.
2678 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2681 if (auto *Call = dyn_cast<CallInst>(Op)) {
2682 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2686 // sqrt(-0.0) = -0.0, no other negative results are possible.
2687 case Intrinsic::sqrt:
2688 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2690 case Intrinsic::fabs:
2698 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2699 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2700 /// bit despite comparing equal.
2701 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2702 const TargetLibraryInfo *TLI,
2705 // TODO: This function does not do the right thing when SignBitOnly is true
2706 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2707 // which flips the sign bits of NaNs. See
2708 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2710 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2711 return !CFP->getValueAPF().isNegative() ||
2712 (!SignBitOnly && CFP->getValueAPF().isZero());
2715 if (Depth == MaxDepth)
2716 return false; // Limit search depth.
2718 const Operator *I = dyn_cast<Operator>(V);
2722 switch (I->getOpcode()) {
2725 // Unsigned integers are always nonnegative.
2726 case Instruction::UIToFP:
2728 case Instruction::FMul:
2729 // x*x is always non-negative or a NaN.
2730 if (I->getOperand(0) == I->getOperand(1) &&
2731 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2735 case Instruction::FAdd:
2736 case Instruction::FDiv:
2737 case Instruction::FRem:
2738 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2740 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2742 case Instruction::Select:
2743 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2745 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2747 case Instruction::FPExt:
2748 case Instruction::FPTrunc:
2749 // Widening/narrowing never change sign.
2750 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2752 case Instruction::Call:
2753 const auto *CI = cast<CallInst>(I);
2754 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2758 case Intrinsic::maxnum:
2759 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2761 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2763 case Intrinsic::minnum:
2764 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2766 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2768 case Intrinsic::exp:
2769 case Intrinsic::exp2:
2770 case Intrinsic::fabs:
2773 case Intrinsic::sqrt:
2774 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2777 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2778 CannotBeNegativeZero(CI->getOperand(0), TLI));
2780 case Intrinsic::powi:
2781 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2782 // powi(x,n) is non-negative if n is even.
2783 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2786 // TODO: This is not correct. Given that exp is an integer, here are the
2787 // ways that pow can return a negative value:
2789 // pow(x, exp) --> negative if exp is odd and x is negative.
2790 // pow(-0, exp) --> -inf if exp is negative odd.
2791 // pow(-0, exp) --> -0 if exp is positive odd.
2792 // pow(-inf, exp) --> -0 if exp is negative odd.
2793 // pow(-inf, exp) --> -inf if exp is positive odd.
2795 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2796 // but we must return false if x == -0. Unfortunately we do not currently
2797 // have a way of expressing this constraint. See details in
2798 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2799 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2802 case Intrinsic::fma:
2803 case Intrinsic::fmuladd:
2804 // x*x+y is non-negative if y is non-negative.
2805 return I->getOperand(0) == I->getOperand(1) &&
2806 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2807 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2815 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2816 const TargetLibraryInfo *TLI) {
2817 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2820 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2821 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2824 bool llvm::isKnownNeverNaN(const Value *V) {
2825 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2827 // If we're told that NaNs won't happen, assume they won't.
2828 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2829 if (FPMathOp->hasNoNaNs())
2832 // TODO: Handle instructions and potentially recurse like other 'isKnown'
2833 // functions. For example, the result of sitofp is never NaN.
2835 // Handle scalar constants.
2836 if (auto *CFP = dyn_cast<ConstantFP>(V))
2837 return !CFP->isNaN();
2839 // Bail out for constant expressions, but try to handle vector constants.
2840 if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2843 // For vectors, verify that each element is not NaN.
2844 unsigned NumElts = V->getType()->getVectorNumElements();
2845 for (unsigned i = 0; i != NumElts; ++i) {
2846 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2849 if (isa<UndefValue>(Elt))
2851 auto *CElt = dyn_cast<ConstantFP>(Elt);
2852 if (!CElt || CElt->isNaN())
2855 // All elements were confirmed not-NaN or undefined.
2859 /// If the specified value can be set by repeating the same byte in memory,
2860 /// return the i8 value that it is represented with. This is
2861 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2862 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2863 /// byte store (e.g. i16 0x1234), return null.
2864 Value *llvm::isBytewiseValue(Value *V) {
2865 // All byte-wide stores are splatable, even of arbitrary variables.
2866 if (V->getType()->isIntegerTy(8)) return V;
2868 // Handle 'null' ConstantArrayZero etc.
2869 if (Constant *C = dyn_cast<Constant>(V))
2870 if (C->isNullValue())
2871 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2873 // Constant float and double values can be handled as integer values if the
2874 // corresponding integer value is "byteable". An important case is 0.0.
2875 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2876 if (CFP->getType()->isFloatTy())
2877 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2878 if (CFP->getType()->isDoubleTy())
2879 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2880 // Don't handle long double formats, which have strange constraints.
2883 // We can handle constant integers that are multiple of 8 bits.
2884 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2885 if (CI->getBitWidth() % 8 == 0) {
2886 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2888 if (!CI->getValue().isSplat(8))
2890 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2894 // A ConstantDataArray/Vector is splatable if all its members are equal and
2896 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2897 Value *Elt = CA->getElementAsConstant(0);
2898 Value *Val = isBytewiseValue(Elt);
2902 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2903 if (CA->getElementAsConstant(I) != Elt)
2909 // Conceptually, we could handle things like:
2910 // %a = zext i8 %X to i16
2911 // %b = shl i16 %a, 8
2912 // %c = or i16 %a, %b
2913 // but until there is an example that actually needs this, it doesn't seem
2914 // worth worrying about.
2918 // This is the recursive version of BuildSubAggregate. It takes a few different
2919 // arguments. Idxs is the index within the nested struct From that we are
2920 // looking at now (which is of type IndexedType). IdxSkip is the number of
2921 // indices from Idxs that should be left out when inserting into the resulting
2922 // struct. To is the result struct built so far, new insertvalue instructions
2924 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2925 SmallVectorImpl<unsigned> &Idxs,
2927 Instruction *InsertBefore) {
2928 StructType *STy = dyn_cast<StructType>(IndexedType);
2930 // Save the original To argument so we can modify it
2932 // General case, the type indexed by Idxs is a struct
2933 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2934 // Process each struct element recursively
2937 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2941 // Couldn't find any inserted value for this index? Cleanup
2942 while (PrevTo != OrigTo) {
2943 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2944 PrevTo = Del->getAggregateOperand();
2945 Del->eraseFromParent();
2947 // Stop processing elements
2951 // If we successfully found a value for each of our subaggregates
2955 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2956 // the struct's elements had a value that was inserted directly. In the latter
2957 // case, perhaps we can't determine each of the subelements individually, but
2958 // we might be able to find the complete struct somewhere.
2960 // Find the value that is at that particular spot
2961 Value *V = FindInsertedValue(From, Idxs);
2966 // Insert the value in the new (sub) aggregrate
2967 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2968 "tmp", InsertBefore);
2971 // This helper takes a nested struct and extracts a part of it (which is again a
2972 // struct) into a new value. For example, given the struct:
2973 // { a, { b, { c, d }, e } }
2974 // and the indices "1, 1" this returns
2977 // It does this by inserting an insertvalue for each element in the resulting
2978 // struct, as opposed to just inserting a single struct. This will only work if
2979 // each of the elements of the substruct are known (ie, inserted into From by an
2980 // insertvalue instruction somewhere).
2982 // All inserted insertvalue instructions are inserted before InsertBefore
2983 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2984 Instruction *InsertBefore) {
2985 assert(InsertBefore && "Must have someplace to insert!");
2986 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2988 Value *To = UndefValue::get(IndexedType);
2989 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2990 unsigned IdxSkip = Idxs.size();
2992 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2995 /// Given an aggregrate and an sequence of indices, see if
2996 /// the scalar value indexed is already around as a register, for example if it
2997 /// were inserted directly into the aggregrate.
2999 /// If InsertBefore is not null, this function will duplicate (modified)
3000 /// insertvalues when a part of a nested struct is extracted.
3001 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3002 Instruction *InsertBefore) {
3003 // Nothing to index? Just return V then (this is useful at the end of our
3005 if (idx_range.empty())
3007 // We have indices, so V should have an indexable type.
3008 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3009 "Not looking at a struct or array?");
3010 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3011 "Invalid indices for type?");
3013 if (Constant *C = dyn_cast<Constant>(V)) {
3014 C = C->getAggregateElement(idx_range[0]);
3015 if (!C) return nullptr;
3016 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3019 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3020 // Loop the indices for the insertvalue instruction in parallel with the
3021 // requested indices
3022 const unsigned *req_idx = idx_range.begin();
3023 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3024 i != e; ++i, ++req_idx) {
3025 if (req_idx == idx_range.end()) {
3026 // We can't handle this without inserting insertvalues
3030 // The requested index identifies a part of a nested aggregate. Handle
3031 // this specially. For example,
3032 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3033 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3034 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3035 // This can be changed into
3036 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3037 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3038 // which allows the unused 0,0 element from the nested struct to be
3040 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3044 // This insert value inserts something else than what we are looking for.
3045 // See if the (aggregate) value inserted into has the value we are
3046 // looking for, then.
3048 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3051 // If we end up here, the indices of the insertvalue match with those
3052 // requested (though possibly only partially). Now we recursively look at
3053 // the inserted value, passing any remaining indices.
3054 return FindInsertedValue(I->getInsertedValueOperand(),
3055 makeArrayRef(req_idx, idx_range.end()),
3059 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3060 // If we're extracting a value from an aggregate that was extracted from
3061 // something else, we can extract from that something else directly instead.
3062 // However, we will need to chain I's indices with the requested indices.
3064 // Calculate the number of indices required
3065 unsigned size = I->getNumIndices() + idx_range.size();
3066 // Allocate some space to put the new indices in
3067 SmallVector<unsigned, 5> Idxs;
3069 // Add indices from the extract value instruction
3070 Idxs.append(I->idx_begin(), I->idx_end());
3072 // Add requested indices
3073 Idxs.append(idx_range.begin(), idx_range.end());
3075 assert(Idxs.size() == size
3076 && "Number of indices added not correct?");
3078 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3080 // Otherwise, we don't know (such as, extracting from a function return value
3081 // or load instruction)
3085 /// Analyze the specified pointer to see if it can be expressed as a base
3086 /// pointer plus a constant offset. Return the base and offset to the caller.
3087 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
3088 const DataLayout &DL) {
3089 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
3090 APInt ByteOffset(BitWidth, 0);
3092 // We walk up the defs but use a visited set to handle unreachable code. In
3093 // that case, we stop after accumulating the cycle once (not that it
3095 SmallPtrSet<Value *, 16> Visited;
3096 while (Visited.insert(Ptr).second) {
3097 if (Ptr->getType()->isVectorTy())
3100 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3101 // If one of the values we have visited is an addrspacecast, then
3102 // the pointer type of this GEP may be different from the type
3103 // of the Ptr parameter which was passed to this function. This
3104 // means when we construct GEPOffset, we need to use the size
3105 // of GEP's pointer type rather than the size of the original
3107 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
3108 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3111 ByteOffset += GEPOffset.getSExtValue();
3113 Ptr = GEP->getPointerOperand();
3114 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3115 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3116 Ptr = cast<Operator>(Ptr)->getOperand(0);
3117 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3118 if (GA->isInterposable())
3120 Ptr = GA->getAliasee();
3125 Offset = ByteOffset.getSExtValue();
3129 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3130 unsigned CharSize) {
3131 // Make sure the GEP has exactly three arguments.
3132 if (GEP->getNumOperands() != 3)
3135 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3137 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3138 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3141 // Check to make sure that the first operand of the GEP is an integer and
3142 // has value 0 so that we are sure we're indexing into the initializer.
3143 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3144 if (!FirstIdx || !FirstIdx->isZero())
3150 bool llvm::getConstantDataArrayInfo(const Value *V,
3151 ConstantDataArraySlice &Slice,
3152 unsigned ElementSize, uint64_t Offset) {
3155 // Look through bitcast instructions and geps.
3156 V = V->stripPointerCasts();
3158 // If the value is a GEP instruction or constant expression, treat it as an
3160 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3161 // The GEP operator should be based on a pointer to string constant, and is
3162 // indexing into the string constant.
3163 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3166 // If the second index isn't a ConstantInt, then this is a variable index
3167 // into the array. If this occurs, we can't say anything meaningful about
3169 uint64_t StartIdx = 0;
3170 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3171 StartIdx = CI->getZExtValue();
3174 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3178 // The GEP instruction, constant or instruction, must reference a global
3179 // variable that is a constant and is initialized. The referenced constant
3180 // initializer is the array that we'll use for optimization.
3181 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3182 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3185 const ConstantDataArray *Array;
3187 if (GV->getInitializer()->isNullValue()) {
3188 Type *GVTy = GV->getValueType();
3189 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3190 // A zeroinitializer for the array; there is no ConstantDataArray.
3193 const DataLayout &DL = GV->getParent()->getDataLayout();
3194 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3195 uint64_t Length = SizeInBytes / (ElementSize / 8);
3196 if (Length <= Offset)
3199 Slice.Array = nullptr;
3201 Slice.Length = Length - Offset;
3205 // This must be a ConstantDataArray.
3206 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3209 ArrayTy = Array->getType();
3211 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3214 uint64_t NumElts = ArrayTy->getArrayNumElements();
3215 if (Offset > NumElts)
3218 Slice.Array = Array;
3219 Slice.Offset = Offset;
3220 Slice.Length = NumElts - Offset;
3224 /// This function computes the length of a null-terminated C string pointed to
3225 /// by V. If successful, it returns true and returns the string in Str.
3226 /// If unsuccessful, it returns false.
3227 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3228 uint64_t Offset, bool TrimAtNul) {
3229 ConstantDataArraySlice Slice;
3230 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3233 if (Slice.Array == nullptr) {
3238 if (Slice.Length == 1) {
3239 Str = StringRef("", 1);
3242 // We cannot instantiate a StringRef as we do not have an appropriate string
3247 // Start out with the entire array in the StringRef.
3248 Str = Slice.Array->getAsString();
3249 // Skip over 'offset' bytes.
3250 Str = Str.substr(Slice.Offset);
3253 // Trim off the \0 and anything after it. If the array is not nul
3254 // terminated, we just return the whole end of string. The client may know
3255 // some other way that the string is length-bound.
3256 Str = Str.substr(0, Str.find('\0'));
3261 // These next two are very similar to the above, but also look through PHI
3263 // TODO: See if we can integrate these two together.
3265 /// If we can compute the length of the string pointed to by
3266 /// the specified pointer, return 'len+1'. If we can't, return 0.
3267 static uint64_t GetStringLengthH(const Value *V,
3268 SmallPtrSetImpl<const PHINode*> &PHIs,
3269 unsigned CharSize) {
3270 // Look through noop bitcast instructions.
3271 V = V->stripPointerCasts();
3273 // If this is a PHI node, there are two cases: either we have already seen it
3275 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3276 if (!PHIs.insert(PN).second)
3277 return ~0ULL; // already in the set.
3279 // If it was new, see if all the input strings are the same length.
3280 uint64_t LenSoFar = ~0ULL;
3281 for (Value *IncValue : PN->incoming_values()) {
3282 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3283 if (Len == 0) return 0; // Unknown length -> unknown.
3285 if (Len == ~0ULL) continue;
3287 if (Len != LenSoFar && LenSoFar != ~0ULL)
3288 return 0; // Disagree -> unknown.
3292 // Success, all agree.
3296 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3297 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3298 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3299 if (Len1 == 0) return 0;
3300 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3301 if (Len2 == 0) return 0;
3302 if (Len1 == ~0ULL) return Len2;
3303 if (Len2 == ~0ULL) return Len1;
3304 if (Len1 != Len2) return 0;
3308 // Otherwise, see if we can read the string.
3309 ConstantDataArraySlice Slice;
3310 if (!getConstantDataArrayInfo(V, Slice, CharSize))
3313 if (Slice.Array == nullptr)
3316 // Search for nul characters
3317 unsigned NullIndex = 0;
3318 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3319 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3323 return NullIndex + 1;
3326 /// If we can compute the length of the string pointed to by
3327 /// the specified pointer, return 'len+1'. If we can't, return 0.
3328 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3329 if (!V->getType()->isPointerTy()) return 0;
3331 SmallPtrSet<const PHINode*, 32> PHIs;
3332 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3333 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3334 // an empty string as a length.
3335 return Len == ~0ULL ? 1 : Len;
3338 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
3339 /// previous iteration of the loop was referring to the same object as \p PN.
3340 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3341 const LoopInfo *LI) {
3342 // Find the loop-defined value.
3343 Loop *L = LI->getLoopFor(PN->getParent());
3344 if (PN->getNumIncomingValues() != 2)
3347 // Find the value from previous iteration.
3348 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3349 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3350 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3351 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3354 // If a new pointer is loaded in the loop, the pointer references a different
3355 // object in every iteration. E.g.:
3359 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3360 if (!L->isLoopInvariant(Load->getPointerOperand()))
3365 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3366 unsigned MaxLookup) {
3367 if (!V->getType()->isPointerTy())
3369 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3370 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3371 V = GEP->getPointerOperand();
3372 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3373 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3374 V = cast<Operator>(V)->getOperand(0);
3375 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3376 if (GA->isInterposable())
3378 V = GA->getAliasee();
3379 } else if (isa<AllocaInst>(V)) {
3380 // An alloca can't be further simplified.
3383 if (auto CS = CallSite(V))
3384 if (Value *RV = CS.getReturnedArgOperand()) {
3389 // See if InstructionSimplify knows any relevant tricks.
3390 if (Instruction *I = dyn_cast<Instruction>(V))
3391 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3392 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3399 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3404 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3405 const DataLayout &DL, LoopInfo *LI,
3406 unsigned MaxLookup) {
3407 SmallPtrSet<Value *, 4> Visited;
3408 SmallVector<Value *, 4> Worklist;
3409 Worklist.push_back(V);
3411 Value *P = Worklist.pop_back_val();
3412 P = GetUnderlyingObject(P, DL, MaxLookup);
3414 if (!Visited.insert(P).second)
3417 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3418 Worklist.push_back(SI->getTrueValue());
3419 Worklist.push_back(SI->getFalseValue());
3423 if (PHINode *PN = dyn_cast<PHINode>(P)) {
3424 // If this PHI changes the underlying object in every iteration of the
3425 // loop, don't look through it. Consider:
3428 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3432 // Prev is tracking Curr one iteration behind so they refer to different
3433 // underlying objects.
3434 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3435 isSameUnderlyingObjectInLoop(PN, LI))
3436 for (Value *IncValue : PN->incoming_values())
3437 Worklist.push_back(IncValue);
3441 Objects.push_back(P);
3442 } while (!Worklist.empty());
3445 /// This is the function that does the work of looking through basic
3446 /// ptrtoint+arithmetic+inttoptr sequences.
3447 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3449 if (const Operator *U = dyn_cast<Operator>(V)) {
3450 // If we find a ptrtoint, we can transfer control back to the
3451 // regular getUnderlyingObjectFromInt.
3452 if (U->getOpcode() == Instruction::PtrToInt)
3453 return U->getOperand(0);
3454 // If we find an add of a constant, a multiplied value, or a phi, it's
3455 // likely that the other operand will lead us to the base
3456 // object. We don't have to worry about the case where the
3457 // object address is somehow being computed by the multiply,
3458 // because our callers only care when the result is an
3459 // identifiable object.
3460 if (U->getOpcode() != Instruction::Add ||
3461 (!isa<ConstantInt>(U->getOperand(1)) &&
3462 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3463 !isa<PHINode>(U->getOperand(1))))
3465 V = U->getOperand(0);
3469 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3473 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3474 /// ptrtoint+arithmetic+inttoptr sequences.
3475 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3476 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3477 SmallVectorImpl<Value *> &Objects,
3478 const DataLayout &DL) {
3479 SmallPtrSet<const Value *, 16> Visited;
3480 SmallVector<const Value *, 4> Working(1, V);
3482 V = Working.pop_back_val();
3484 SmallVector<Value *, 4> Objs;
3485 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3487 for (Value *V : Objs) {
3488 if (!Visited.insert(V).second)
3490 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3492 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3493 if (O->getType()->isPointerTy()) {
3494 Working.push_back(O);
3498 // If GetUnderlyingObjects fails to find an identifiable object,
3499 // getUnderlyingObjectsForCodeGen also fails for safety.
3500 if (!isIdentifiedObject(V)) {
3504 Objects.push_back(const_cast<Value *>(V));
3506 } while (!Working.empty());
3510 /// Return true if the only users of this pointer are lifetime markers.
3511 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3512 for (const User *U : V->users()) {
3513 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3514 if (!II) return false;
3516 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3517 II->getIntrinsicID() != Intrinsic::lifetime_end)
3523 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3524 const Instruction *CtxI,
3525 const DominatorTree *DT) {
3526 const Operator *Inst = dyn_cast<Operator>(V);
3530 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3531 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3535 switch (Inst->getOpcode()) {
3538 case Instruction::UDiv:
3539 case Instruction::URem: {
3540 // x / y is undefined if y == 0.
3542 if (match(Inst->getOperand(1), m_APInt(V)))
3546 case Instruction::SDiv:
3547 case Instruction::SRem: {
3548 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3549 const APInt *Numerator, *Denominator;
3550 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3552 // We cannot hoist this division if the denominator is 0.
3553 if (*Denominator == 0)
3555 // It's safe to hoist if the denominator is not 0 or -1.
3556 if (*Denominator != -1)
3558 // At this point we know that the denominator is -1. It is safe to hoist as
3559 // long we know that the numerator is not INT_MIN.
3560 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3561 return !Numerator->isMinSignedValue();
3562 // The numerator *might* be MinSignedValue.
3565 case Instruction::Load: {
3566 const LoadInst *LI = cast<LoadInst>(Inst);
3567 if (!LI->isUnordered() ||
3568 // Speculative load may create a race that did not exist in the source.
3569 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3570 // Speculative load may load data from dirty regions.
3571 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3572 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3574 const DataLayout &DL = LI->getModule()->getDataLayout();
3575 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3576 LI->getAlignment(), DL, CtxI, DT);
3578 case Instruction::Call: {
3579 auto *CI = cast<const CallInst>(Inst);
3580 const Function *Callee = CI->getCalledFunction();
3582 // The called function could have undefined behavior or side-effects, even
3583 // if marked readnone nounwind.
3584 return Callee && Callee->isSpeculatable();
3586 case Instruction::VAArg:
3587 case Instruction::Alloca:
3588 case Instruction::Invoke:
3589 case Instruction::PHI:
3590 case Instruction::Store:
3591 case Instruction::Ret:
3592 case Instruction::Br:
3593 case Instruction::IndirectBr:
3594 case Instruction::Switch:
3595 case Instruction::Unreachable:
3596 case Instruction::Fence:
3597 case Instruction::AtomicRMW:
3598 case Instruction::AtomicCmpXchg:
3599 case Instruction::LandingPad:
3600 case Instruction::Resume:
3601 case Instruction::CatchSwitch:
3602 case Instruction::CatchPad:
3603 case Instruction::CatchRet:
3604 case Instruction::CleanupPad:
3605 case Instruction::CleanupRet:
3606 return false; // Misc instructions which have effects
3610 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3611 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3614 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3616 const DataLayout &DL,
3617 AssumptionCache *AC,
3618 const Instruction *CxtI,
3619 const DominatorTree *DT) {
3620 // Multiplying n * m significant bits yields a result of n + m significant
3621 // bits. If the total number of significant bits does not exceed the
3622 // result bit width (minus 1), there is no overflow.
3623 // This means if we have enough leading zero bits in the operands
3624 // we can guarantee that the result does not overflow.
3625 // Ref: "Hacker's Delight" by Henry Warren
3626 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3627 KnownBits LHSKnown(BitWidth);
3628 KnownBits RHSKnown(BitWidth);
3629 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3630 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3631 // Note that underestimating the number of zero bits gives a more
3632 // conservative answer.
3633 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3634 RHSKnown.countMinLeadingZeros();
3635 // First handle the easy case: if we have enough zero bits there's
3636 // definitely no overflow.
3637 if (ZeroBits >= BitWidth)
3638 return OverflowResult::NeverOverflows;
3640 // Get the largest possible values for each operand.
3641 APInt LHSMax = ~LHSKnown.Zero;
3642 APInt RHSMax = ~RHSKnown.Zero;
3644 // We know the multiply operation doesn't overflow if the maximum values for
3645 // each operand will not overflow after we multiply them together.
3647 (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3649 return OverflowResult::NeverOverflows;
3651 // We know it always overflows if multiplying the smallest possible values for
3652 // the operands also results in overflow.
3654 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3656 return OverflowResult::AlwaysOverflows;
3658 return OverflowResult::MayOverflow;
3661 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3663 const DataLayout &DL,
3664 AssumptionCache *AC,
3665 const Instruction *CxtI,
3666 const DominatorTree *DT) {
3667 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3668 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3669 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3671 if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3672 // The sign bit is set in both cases: this MUST overflow.
3673 // Create a simple add instruction, and insert it into the struct.
3674 return OverflowResult::AlwaysOverflows;
3677 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3678 // The sign bit is clear in both cases: this CANNOT overflow.
3679 // Create a simple add instruction, and insert it into the struct.
3680 return OverflowResult::NeverOverflows;
3684 return OverflowResult::MayOverflow;
3687 /// \brief Return true if we can prove that adding the two values of the
3688 /// knownbits will not overflow.
3689 /// Otherwise return false.
3690 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3691 const KnownBits &RHSKnown) {
3692 // Addition of two 2's complement numbers having opposite signs will never
3694 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3695 (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3698 // If either of the values is known to be non-negative, adding them can only
3699 // overflow if the second is also non-negative, so we can assume that.
3700 // Two non-negative numbers will only overflow if there is a carry to the
3701 // sign bit, so we can check if even when the values are as big as possible
3702 // there is no overflow to the sign bit.
3703 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3704 APInt MaxLHS = ~LHSKnown.Zero;
3705 MaxLHS.clearSignBit();
3706 APInt MaxRHS = ~RHSKnown.Zero;
3707 MaxRHS.clearSignBit();
3708 APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3709 return Result.isSignBitClear();
3712 // If either of the values is known to be negative, adding them can only
3713 // overflow if the second is also negative, so we can assume that.
3714 // Two negative number will only overflow if there is no carry to the sign
3715 // bit, so we can check if even when the values are as small as possible
3716 // there is overflow to the sign bit.
3717 if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3718 APInt MinLHS = LHSKnown.One;
3719 MinLHS.clearSignBit();
3720 APInt MinRHS = RHSKnown.One;
3721 MinRHS.clearSignBit();
3722 APInt Result = std::move(MinLHS) + std::move(MinRHS);
3723 return Result.isSignBitSet();
3726 // If we reached here it means that we know nothing about the sign bits.
3727 // In this case we can't know if there will be an overflow, since by
3728 // changing the sign bits any two values can be made to overflow.
3732 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3734 const AddOperator *Add,
3735 const DataLayout &DL,
3736 AssumptionCache *AC,
3737 const Instruction *CxtI,
3738 const DominatorTree *DT) {
3739 if (Add && Add->hasNoSignedWrap()) {
3740 return OverflowResult::NeverOverflows;
3743 // If LHS and RHS each have at least two sign bits, the addition will look
3749 // If the carry into the most significant position is 0, X and Y can't both
3750 // be 1 and therefore the carry out of the addition is also 0.
3752 // If the carry into the most significant position is 1, X and Y can't both
3753 // be 0 and therefore the carry out of the addition is also 1.
3755 // Since the carry into the most significant position is always equal to
3756 // the carry out of the addition, there is no signed overflow.
3757 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3758 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3759 return OverflowResult::NeverOverflows;
3761 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3762 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3764 if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3765 return OverflowResult::NeverOverflows;
3767 // The remaining code needs Add to be available. Early returns if not so.
3769 return OverflowResult::MayOverflow;
3771 // If the sign of Add is the same as at least one of the operands, this add
3772 // CANNOT overflow. This is particularly useful when the sum is
3773 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3775 bool LHSOrRHSKnownNonNegative =
3776 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3777 bool LHSOrRHSKnownNegative =
3778 (LHSKnown.isNegative() || RHSKnown.isNegative());
3779 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3780 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3781 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3782 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3783 return OverflowResult::NeverOverflows;
3787 return OverflowResult::MayOverflow;
3790 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3791 const DominatorTree &DT) {
3793 auto IID = II->getIntrinsicID();
3794 assert((IID == Intrinsic::sadd_with_overflow ||
3795 IID == Intrinsic::uadd_with_overflow ||
3796 IID == Intrinsic::ssub_with_overflow ||
3797 IID == Intrinsic::usub_with_overflow ||
3798 IID == Intrinsic::smul_with_overflow ||
3799 IID == Intrinsic::umul_with_overflow) &&
3800 "Not an overflow intrinsic!");
3803 SmallVector<const BranchInst *, 2> GuardingBranches;
3804 SmallVector<const ExtractValueInst *, 2> Results;
3806 for (const User *U : II->users()) {
3807 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3808 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3810 if (EVI->getIndices()[0] == 0)
3811 Results.push_back(EVI);
3813 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3815 for (const auto *U : EVI->users())
3816 if (const auto *B = dyn_cast<BranchInst>(U)) {
3817 assert(B->isConditional() && "How else is it using an i1?");
3818 GuardingBranches.push_back(B);
3822 // We are using the aggregate directly in a way we don't want to analyze
3823 // here (storing it to a global, say).
3828 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3829 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3830 if (!NoWrapEdge.isSingleEdge())
3833 // Check if all users of the add are provably no-wrap.
3834 for (const auto *Result : Results) {
3835 // If the extractvalue itself is not executed on overflow, the we don't
3836 // need to check each use separately, since domination is transitive.
3837 if (DT.dominates(NoWrapEdge, Result->getParent()))
3840 for (auto &RU : Result->uses())
3841 if (!DT.dominates(NoWrapEdge, RU))
3848 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
3852 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3853 const DataLayout &DL,
3854 AssumptionCache *AC,
3855 const Instruction *CxtI,
3856 const DominatorTree *DT) {
3857 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3858 Add, DL, AC, CxtI, DT);
3861 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3863 const DataLayout &DL,
3864 AssumptionCache *AC,
3865 const Instruction *CxtI,
3866 const DominatorTree *DT) {
3867 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3870 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3871 // A memory operation returns normally if it isn't volatile. A volatile
3872 // operation is allowed to trap.
3874 // An atomic operation isn't guaranteed to return in a reasonable amount of
3875 // time because it's possible for another thread to interfere with it for an
3876 // arbitrary length of time, but programs aren't allowed to rely on that.
3877 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3878 return !LI->isVolatile();
3879 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3880 return !SI->isVolatile();
3881 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3882 return !CXI->isVolatile();
3883 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3884 return !RMWI->isVolatile();
3885 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3886 return !MII->isVolatile();
3888 // If there is no successor, then execution can't transfer to it.
3889 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3890 return !CRI->unwindsToCaller();
3891 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3892 return !CatchSwitch->unwindsToCaller();
3893 if (isa<ResumeInst>(I))
3895 if (isa<ReturnInst>(I))
3897 if (isa<UnreachableInst>(I))
3900 // Calls can throw, or contain an infinite loop, or kill the process.
3901 if (auto CS = ImmutableCallSite(I)) {
3902 // Call sites that throw have implicit non-local control flow.
3903 if (!CS.doesNotThrow())
3906 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3907 // etc. and thus not return. However, LLVM already assumes that
3909 // - Thread exiting actions are modeled as writes to memory invisible to
3912 // - Loops that don't have side effects (side effects are volatile/atomic
3913 // stores and IO) always terminate (see http://llvm.org/PR965).
3914 // Furthermore IO itself is also modeled as writes to memory invisible to
3917 // We rely on those assumptions here, and use the memory effects of the call
3918 // target as a proxy for checking that it always returns.
3920 // FIXME: This isn't aggressive enough; a call which only writes to a global
3921 // is guaranteed to return.
3922 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3923 match(I, m_Intrinsic<Intrinsic::assume>()) ||
3924 match(I, m_Intrinsic<Intrinsic::sideeffect>());
3927 // Other instructions return normally.
3931 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3933 // The loop header is guaranteed to be executed for every iteration.
3935 // FIXME: Relax this constraint to cover all basic blocks that are
3936 // guaranteed to be executed at every iteration.
3937 if (I->getParent() != L->getHeader()) return false;
3939 for (const Instruction &LI : *L->getHeader()) {
3940 if (&LI == I) return true;
3941 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3943 llvm_unreachable("Instruction not contained in its own parent basic block.");
3946 bool llvm::propagatesFullPoison(const Instruction *I) {
3947 switch (I->getOpcode()) {
3948 case Instruction::Add:
3949 case Instruction::Sub:
3950 case Instruction::Xor:
3951 case Instruction::Trunc:
3952 case Instruction::BitCast:
3953 case Instruction::AddrSpaceCast:
3954 case Instruction::Mul:
3955 case Instruction::Shl:
3956 case Instruction::GetElementPtr:
3957 // These operations all propagate poison unconditionally. Note that poison
3958 // is not any particular value, so xor or subtraction of poison with
3959 // itself still yields poison, not zero.
3962 case Instruction::AShr:
3963 case Instruction::SExt:
3964 // For these operations, one bit of the input is replicated across
3965 // multiple output bits. A replicated poison bit is still poison.
3968 case Instruction::ICmp:
3969 // Comparing poison with any value yields poison. This is why, for
3970 // instance, x s< (x +nsw 1) can be folded to true.
3978 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3979 switch (I->getOpcode()) {
3980 case Instruction::Store:
3981 return cast<StoreInst>(I)->getPointerOperand();
3983 case Instruction::Load:
3984 return cast<LoadInst>(I)->getPointerOperand();
3986 case Instruction::AtomicCmpXchg:
3987 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3989 case Instruction::AtomicRMW:
3990 return cast<AtomicRMWInst>(I)->getPointerOperand();
3992 case Instruction::UDiv:
3993 case Instruction::SDiv:
3994 case Instruction::URem:
3995 case Instruction::SRem:
3996 return I->getOperand(1);
4003 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4004 // We currently only look for uses of poison values within the same basic
4005 // block, as that makes it easier to guarantee that the uses will be
4006 // executed given that PoisonI is executed.
4008 // FIXME: Expand this to consider uses beyond the same basic block. To do
4009 // this, look out for the distinction between post-dominance and strong
4011 const BasicBlock *BB = PoisonI->getParent();
4013 // Set of instructions that we have proved will yield poison if PoisonI
4015 SmallSet<const Value *, 16> YieldsPoison;
4016 SmallSet<const BasicBlock *, 4> Visited;
4017 YieldsPoison.insert(PoisonI);
4018 Visited.insert(PoisonI->getParent());
4020 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4023 while (Iter++ < MaxDepth) {
4024 for (auto &I : make_range(Begin, End)) {
4025 if (&I != PoisonI) {
4026 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4027 if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4029 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4033 // Mark poison that propagates from I through uses of I.
4034 if (YieldsPoison.count(&I)) {
4035 for (const User *User : I.users()) {
4036 const Instruction *UserI = cast<Instruction>(User);
4037 if (propagatesFullPoison(UserI))
4038 YieldsPoison.insert(User);
4043 if (auto *NextBB = BB->getSingleSuccessor()) {
4044 if (Visited.insert(NextBB).second) {
4046 Begin = BB->getFirstNonPHI()->getIterator();
4057 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4061 if (auto *C = dyn_cast<ConstantFP>(V))
4066 static bool isKnownNonZero(const Value *V) {
4067 if (auto *C = dyn_cast<ConstantFP>(V))
4068 return !C->isZero();
4072 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4073 /// Given non-min/max outer cmp/select from the clamp pattern this
4074 /// function recognizes if it can be substitued by a "canonical" min/max
4076 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4077 Value *CmpLHS, Value *CmpRHS,
4078 Value *TrueVal, Value *FalseVal,
4079 Value *&LHS, Value *&RHS) {
4081 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4082 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4083 // and return description of the outer Max/Min.
4085 // First, check if select has inverse order:
4086 if (CmpRHS == FalseVal) {
4087 std::swap(TrueVal, FalseVal);
4088 Pred = CmpInst::getInversePredicate(Pred);
4091 // Assume success now. If there's no match, callers should not use these anyway.
4096 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4097 return {SPF_UNKNOWN, SPNB_NA, false};
4101 case CmpInst::FCMP_OLT:
4102 case CmpInst::FCMP_OLE:
4103 case CmpInst::FCMP_ULT:
4104 case CmpInst::FCMP_ULE:
4106 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4107 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4108 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4109 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4111 case CmpInst::FCMP_OGT:
4112 case CmpInst::FCMP_OGE:
4113 case CmpInst::FCMP_UGT:
4114 case CmpInst::FCMP_UGE:
4116 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4117 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4118 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4119 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4125 return {SPF_UNKNOWN, SPNB_NA, false};
4128 /// Recognize variations of:
4129 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4130 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4131 Value *CmpLHS, Value *CmpRHS,
4132 Value *TrueVal, Value *FalseVal) {
4133 // Swap the select operands and predicate to match the patterns below.
4134 if (CmpRHS != TrueVal) {
4135 Pred = ICmpInst::getSwappedPredicate(Pred);
4136 std::swap(TrueVal, FalseVal);
4139 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4141 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4142 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4143 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4144 return {SPF_SMAX, SPNB_NA, false};
4146 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4147 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4148 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4149 return {SPF_SMIN, SPNB_NA, false};
4151 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4152 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4153 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4154 return {SPF_UMAX, SPNB_NA, false};
4156 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4157 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4158 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4159 return {SPF_UMIN, SPNB_NA, false};
4161 return {SPF_UNKNOWN, SPNB_NA, false};
4164 /// Recognize variations of:
4165 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4166 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4167 Value *CmpLHS, Value *CmpRHS,
4168 Value *TVal, Value *FVal,
4170 // TODO: Allow FP min/max with nnan/nsz.
4171 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4174 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4175 if (!SelectPatternResult::isMinOrMax(L.Flavor))
4176 return {SPF_UNKNOWN, SPNB_NA, false};
4179 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4180 if (L.Flavor != R.Flavor)
4181 return {SPF_UNKNOWN, SPNB_NA, false};
4183 // Match the compare to the min/max operations of the select operands.
4186 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4187 Pred = ICmpInst::getSwappedPredicate(Pred);
4188 std::swap(CmpLHS, CmpRHS);
4190 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4192 return {SPF_UNKNOWN, SPNB_NA, false};
4194 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4195 Pred = ICmpInst::getSwappedPredicate(Pred);
4196 std::swap(CmpLHS, CmpRHS);
4198 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4200 return {SPF_UNKNOWN, SPNB_NA, false};
4202 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4203 Pred = ICmpInst::getSwappedPredicate(Pred);
4204 std::swap(CmpLHS, CmpRHS);
4206 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4208 return {SPF_UNKNOWN, SPNB_NA, false};
4210 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4211 Pred = ICmpInst::getSwappedPredicate(Pred);
4212 std::swap(CmpLHS, CmpRHS);
4214 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4216 return {SPF_UNKNOWN, SPNB_NA, false};
4218 return {SPF_UNKNOWN, SPNB_NA, false};
4221 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4222 if (CmpLHS == A && CmpRHS == C && D == B)
4223 return {L.Flavor, SPNB_NA, false};
4225 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4226 if (CmpLHS == A && CmpRHS == D && C == B)
4227 return {L.Flavor, SPNB_NA, false};
4229 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4230 if (CmpLHS == B && CmpRHS == C && D == A)
4231 return {L.Flavor, SPNB_NA, false};
4233 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4234 if (CmpLHS == B && CmpRHS == D && C == A)
4235 return {L.Flavor, SPNB_NA, false};
4237 return {SPF_UNKNOWN, SPNB_NA, false};
4240 /// Match non-obvious integer minimum and maximum sequences.
4241 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4242 Value *CmpLHS, Value *CmpRHS,
4243 Value *TrueVal, Value *FalseVal,
4244 Value *&LHS, Value *&RHS,
4246 // Assume success. If there's no match, callers should not use these anyway.
4250 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4251 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4254 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4255 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4258 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4259 return {SPF_UNKNOWN, SPNB_NA, false};
4262 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4263 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4264 if (match(TrueVal, m_Zero()) &&
4265 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4266 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4269 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4270 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4271 if (match(FalseVal, m_Zero()) &&
4272 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4273 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4276 if (!match(CmpRHS, m_APInt(C1)))
4277 return {SPF_UNKNOWN, SPNB_NA, false};
4279 // An unsigned min/max can be written with a signed compare.
4281 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4282 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4283 // Is the sign bit set?
4284 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4285 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4286 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4287 C2->isMaxSignedValue())
4288 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4290 // Is the sign bit clear?
4291 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4292 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4293 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4294 C2->isMinSignedValue())
4295 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4298 // Look through 'not' ops to find disguised signed min/max.
4299 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4300 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4301 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4302 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4303 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4305 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4306 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4307 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4308 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4309 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4311 return {SPF_UNKNOWN, SPNB_NA, false};
4314 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4316 Value *CmpLHS, Value *CmpRHS,
4317 Value *TrueVal, Value *FalseVal,
4318 Value *&LHS, Value *&RHS,
4323 // Signed zero may return inconsistent results between implementations.
4324 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4325 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4326 // Therefore, we behave conservatively and only proceed if at least one of the
4327 // operands is known to not be zero or if we don't care about signed zero.
4330 // FIXME: Include OGT/OLT/UGT/ULT.
4331 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4332 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4333 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4334 !isKnownNonZero(CmpRHS))
4335 return {SPF_UNKNOWN, SPNB_NA, false};
4338 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4339 bool Ordered = false;
4341 // When given one NaN and one non-NaN input:
4342 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4343 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4344 // ordered comparison fails), which could be NaN or non-NaN.
4345 // so here we discover exactly what NaN behavior is required/accepted.
4346 if (CmpInst::isFPPredicate(Pred)) {
4347 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4348 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4350 if (LHSSafe && RHSSafe) {
4351 // Both operands are known non-NaN.
4352 NaNBehavior = SPNB_RETURNS_ANY;
4353 } else if (CmpInst::isOrdered(Pred)) {
4354 // An ordered comparison will return false when given a NaN, so it
4358 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4359 NaNBehavior = SPNB_RETURNS_NAN;
4361 NaNBehavior = SPNB_RETURNS_OTHER;
4363 // Completely unsafe.
4364 return {SPF_UNKNOWN, SPNB_NA, false};
4367 // An unordered comparison will return true when given a NaN, so it
4370 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4371 NaNBehavior = SPNB_RETURNS_OTHER;
4373 NaNBehavior = SPNB_RETURNS_NAN;
4375 // Completely unsafe.
4376 return {SPF_UNKNOWN, SPNB_NA, false};
4380 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4381 std::swap(CmpLHS, CmpRHS);
4382 Pred = CmpInst::getSwappedPredicate(Pred);
4383 if (NaNBehavior == SPNB_RETURNS_NAN)
4384 NaNBehavior = SPNB_RETURNS_OTHER;
4385 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4386 NaNBehavior = SPNB_RETURNS_NAN;
4390 // ([if]cmp X, Y) ? X : Y
4391 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4393 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4394 case ICmpInst::ICMP_UGT:
4395 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4396 case ICmpInst::ICMP_SGT:
4397 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4398 case ICmpInst::ICMP_ULT:
4399 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4400 case ICmpInst::ICMP_SLT:
4401 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4402 case FCmpInst::FCMP_UGT:
4403 case FCmpInst::FCMP_UGE:
4404 case FCmpInst::FCMP_OGT:
4405 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4406 case FCmpInst::FCMP_ULT:
4407 case FCmpInst::FCMP_ULE:
4408 case FCmpInst::FCMP_OLT:
4409 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4414 if (match(CmpRHS, m_APInt(C1))) {
4415 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4416 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4418 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4419 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4420 if (Pred == ICmpInst::ICMP_SGT &&
4421 (C1->isNullValue() || C1->isAllOnesValue())) {
4422 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4425 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4426 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4427 if (Pred == ICmpInst::ICMP_SLT &&
4428 (C1->isNullValue() || C1->isOneValue())) {
4429 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4434 if (CmpInst::isIntPredicate(Pred))
4435 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4437 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4438 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4439 // semantics than minNum. Be conservative in such case.
4440 if (NaNBehavior != SPNB_RETURNS_ANY ||
4441 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4442 !isKnownNonZero(CmpRHS)))
4443 return {SPF_UNKNOWN, SPNB_NA, false};
4445 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4448 /// Helps to match a select pattern in case of a type mismatch.
4450 /// The function processes the case when type of true and false values of a
4451 /// select instruction differs from type of the cmp instruction operands because
4452 /// of a cast instructon. The function checks if it is legal to move the cast
4453 /// operation after "select". If yes, it returns the new second value of
4454 /// "select" (with the assumption that cast is moved):
4455 /// 1. As operand of cast instruction when both values of "select" are same cast
4457 /// 2. As restored constant (by applying reverse cast operation) when the first
4458 /// value of the "select" is a cast operation and the second value is a
4460 /// NOTE: We return only the new second value because the first value could be
4461 /// accessed as operand of cast instruction.
4462 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4463 Instruction::CastOps *CastOp) {
4464 auto *Cast1 = dyn_cast<CastInst>(V1);
4468 *CastOp = Cast1->getOpcode();
4469 Type *SrcTy = Cast1->getSrcTy();
4470 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4471 // If V1 and V2 are both the same cast from the same type, look through V1.
4472 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4473 return Cast2->getOperand(0);
4477 auto *C = dyn_cast<Constant>(V2);
4481 Constant *CastedTo = nullptr;
4483 case Instruction::ZExt:
4484 if (CmpI->isUnsigned())
4485 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4487 case Instruction::SExt:
4488 if (CmpI->isSigned())
4489 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4491 case Instruction::Trunc:
4493 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4494 CmpConst->getType() == SrcTy) {
4495 // Here we have the following case:
4497 // %cond = cmp iN %x, CmpConst
4498 // %tr = trunc iN %x to iK
4499 // %narrowsel = select i1 %cond, iK %t, iK C
4501 // We can always move trunc after select operation:
4503 // %cond = cmp iN %x, CmpConst
4504 // %widesel = select i1 %cond, iN %x, iN CmpConst
4505 // %tr = trunc iN %widesel to iK
4507 // Note that C could be extended in any way because we don't care about
4508 // upper bits after truncation. It can't be abs pattern, because it would
4511 // select i1 %cond, x, -x.
4513 // So only min/max pattern could be matched. Such match requires widened C
4514 // == CmpConst. That is why set widened C = CmpConst, condition trunc
4515 // CmpConst == C is checked below.
4516 CastedTo = CmpConst;
4518 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4521 case Instruction::FPTrunc:
4522 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4524 case Instruction::FPExt:
4525 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4527 case Instruction::FPToUI:
4528 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4530 case Instruction::FPToSI:
4531 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4533 case Instruction::UIToFP:
4534 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4536 case Instruction::SIToFP:
4537 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4546 // Make sure the cast doesn't lose any information.
4547 Constant *CastedBack =
4548 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4549 if (CastedBack != C)
4555 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4556 Instruction::CastOps *CastOp,
4558 if (Depth >= MaxDepth)
4559 return {SPF_UNKNOWN, SPNB_NA, false};
4561 SelectInst *SI = dyn_cast<SelectInst>(V);
4562 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4564 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4565 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4567 CmpInst::Predicate Pred = CmpI->getPredicate();
4568 Value *CmpLHS = CmpI->getOperand(0);
4569 Value *CmpRHS = CmpI->getOperand(1);
4570 Value *TrueVal = SI->getTrueValue();
4571 Value *FalseVal = SI->getFalseValue();
4573 if (isa<FPMathOperator>(CmpI))
4574 FMF = CmpI->getFastMathFlags();
4577 if (CmpI->isEquality())
4578 return {SPF_UNKNOWN, SPNB_NA, false};
4580 // Deal with type mismatches.
4581 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4582 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4583 // If this is a potential fmin/fmax with a cast to integer, then ignore
4584 // -0.0 because there is no corresponding integer value.
4585 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4586 FMF.setNoSignedZeros();
4587 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4588 cast<CastInst>(TrueVal)->getOperand(0), C,
4591 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4592 // If this is a potential fmin/fmax with a cast to integer, then ignore
4593 // -0.0 because there is no corresponding integer value.
4594 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4595 FMF.setNoSignedZeros();
4596 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4597 C, cast<CastInst>(FalseVal)->getOperand(0),
4601 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4605 /// Return true if "icmp Pred LHS RHS" is always true.
4606 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4607 const Value *RHS, const DataLayout &DL,
4609 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4610 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4617 case CmpInst::ICMP_SLE: {
4620 // LHS s<= LHS +_{nsw} C if C >= 0
4621 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4622 return !C->isNegative();
4626 case CmpInst::ICMP_ULE: {
4629 // LHS u<= LHS +_{nuw} C for any C
4630 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4633 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4634 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4636 const APInt *&CA, const APInt *&CB) {
4637 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4638 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4641 // If X & C == 0 then (X | C) == X +_{nuw} C
4642 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4643 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4644 KnownBits Known(CA->getBitWidth());
4645 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4646 /*CxtI*/ nullptr, /*DT*/ nullptr);
4647 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4655 const APInt *CLHS, *CRHS;
4656 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4657 return CLHS->ule(*CRHS);
4664 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4665 /// ALHS ARHS" is true. Otherwise, return None.
4666 static Optional<bool>
4667 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4668 const Value *ARHS, const Value *BLHS, const Value *BRHS,
4669 const DataLayout &DL, unsigned Depth) {
4674 case CmpInst::ICMP_SLT:
4675 case CmpInst::ICMP_SLE:
4676 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4677 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4681 case CmpInst::ICMP_ULT:
4682 case CmpInst::ICMP_ULE:
4683 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4684 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4690 /// Return true if the operands of the two compares match. IsSwappedOps is true
4691 /// when the operands match, but are swapped.
4692 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4693 const Value *BLHS, const Value *BRHS,
4694 bool &IsSwappedOps) {
4696 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4697 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4698 return IsMatchingOps || IsSwappedOps;
4701 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4702 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4703 /// BRHS" is false. Otherwise, return None if we can't infer anything.
4704 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4707 CmpInst::Predicate BPred,
4710 bool IsSwappedOps) {
4711 // Canonicalize the operands so they're matching.
4713 std::swap(BLHS, BRHS);
4714 BPred = ICmpInst::getSwappedPredicate(BPred);
4716 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4718 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4724 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4725 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4726 /// C2" is false. Otherwise, return None if we can't infer anything.
4727 static Optional<bool>
4728 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4729 const ConstantInt *C1,
4730 CmpInst::Predicate BPred,
4731 const Value *BLHS, const ConstantInt *C2) {
4732 assert(ALHS == BLHS && "LHS operands must match.");
4733 ConstantRange DomCR =
4734 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4736 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4737 ConstantRange Intersection = DomCR.intersectWith(CR);
4738 ConstantRange Difference = DomCR.difference(CR);
4739 if (Intersection.isEmptySet())
4741 if (Difference.isEmptySet())
4746 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
4747 /// false. Otherwise, return None if we can't infer anything.
4748 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
4749 const ICmpInst *RHS,
4750 const DataLayout &DL, bool LHSIsTrue,
4752 Value *ALHS = LHS->getOperand(0);
4753 Value *ARHS = LHS->getOperand(1);
4754 // The rest of the logic assumes the LHS condition is true. If that's not the
4755 // case, invert the predicate to make it so.
4756 ICmpInst::Predicate APred =
4757 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
4759 Value *BLHS = RHS->getOperand(0);
4760 Value *BRHS = RHS->getOperand(1);
4761 ICmpInst::Predicate BPred = RHS->getPredicate();
4763 // Can we infer anything when the two compares have matching operands?
4765 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4766 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4767 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4769 // No amount of additional analysis will infer the second condition, so
4774 // Can we infer anything when the LHS operands match and the RHS operands are
4775 // constants (not necessarily matching)?
4776 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4777 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4778 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4779 cast<ConstantInt>(BRHS)))
4781 // No amount of additional analysis will infer the second condition, so
4787 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
4791 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
4792 /// false. Otherwise, return None if we can't infer anything. We expect the
4793 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
4794 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
4795 const ICmpInst *RHS,
4796 const DataLayout &DL, bool LHSIsTrue,
4798 // The LHS must be an 'or' or an 'and' instruction.
4799 assert((LHS->getOpcode() == Instruction::And ||
4800 LHS->getOpcode() == Instruction::Or) &&
4801 "Expected LHS to be 'and' or 'or'.");
4803 assert(Depth <= MaxDepth && "Hit recursion limit");
4805 // If the result of an 'or' is false, then we know both legs of the 'or' are
4806 // false. Similarly, if the result of an 'and' is true, then we know both
4807 // legs of the 'and' are true.
4809 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4810 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4811 // FIXME: Make this non-recursion.
4812 if (Optional<bool> Implication =
4813 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
4815 if (Optional<bool> Implication =
4816 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
4823 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4824 const DataLayout &DL, bool LHSIsTrue,
4826 // Bail out when we hit the limit.
4827 if (Depth == MaxDepth)
4830 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
4832 if (LHS->getType() != RHS->getType())
4835 Type *OpTy = LHS->getType();
4836 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
4838 // LHS ==> RHS by definition
4842 // FIXME: Extending the code below to handle vectors.
4843 if (OpTy->isVectorTy())
4846 assert(OpTy->isIntegerTy(1) && "implied by above");
4848 // Both LHS and RHS are icmps.
4849 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
4850 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
4851 if (LHSCmp && RHSCmp)
4852 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
4854 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
4855 // an icmp. FIXME: Add support for and/or on the RHS.
4856 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
4857 if (LHSBO && RHSCmp) {
4858 if ((LHSBO->getOpcode() == Instruction::And ||
4859 LHSBO->getOpcode() == Instruction::Or))
4860 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);