1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/MemoryBuiltins.h"
23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GetElementPtrTypeIterator.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/Metadata.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/IR/PatternMatch.h"
40 #include "llvm/IR/Statepoint.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/KnownBits.h"
43 #include "llvm/Support/MathExtras.h"
48 using namespace llvm::PatternMatch;
50 const unsigned MaxDepth = 6;
52 // Controls the number of uses of the value searched for possible
53 // dominating comparisons.
54 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
55 cl::Hidden, cl::init(20));
57 // This optimization is known to cause performance regressions is some cases,
58 // keep it under a temporary flag for now.
60 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
61 cl::Hidden, cl::init(true));
63 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
64 /// returns the element type's bitwidth.
65 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
66 if (unsigned BitWidth = Ty->getScalarSizeInBits())
69 return DL.getPointerTypeSizeInBits(Ty);
73 // Simplifying using an assume can only be done in a particular control-flow
74 // context (the context instruction provides that context). If an assume and
75 // the context instruction are not in the same block then the DT helps in
76 // figuring out if we can use it.
80 const Instruction *CxtI;
81 const DominatorTree *DT;
82 // Unlike the other analyses, this may be a nullptr because not all clients
83 // provide it currently.
84 OptimizationRemarkEmitter *ORE;
86 /// Set of assumptions that should be excluded from further queries.
87 /// This is because of the potential for mutual recursion to cause
88 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
89 /// classic case of this is assume(x = y), which will attempt to determine
90 /// bits in x from bits in y, which will attempt to determine bits in y from
91 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
92 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
93 /// (all of which can call computeKnownBits), and so on.
94 std::array<const Value *, MaxDepth> Excluded;
97 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
98 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
99 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {}
101 Query(const Query &Q, const Value *NewExcl)
102 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
103 NumExcluded(Q.NumExcluded) {
104 Excluded = Q.Excluded;
105 Excluded[NumExcluded++] = NewExcl;
106 assert(NumExcluded <= Excluded.size());
109 bool isExcluded(const Value *Value) const {
110 if (NumExcluded == 0)
112 auto End = Excluded.begin() + NumExcluded;
113 return std::find(Excluded.begin(), End, Value) != End;
116 } // end anonymous namespace
118 // Given the provided Value and, potentially, a context instruction, return
119 // the preferred context instruction (if any).
120 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
121 // If we've been provided with a context instruction, then use that (provided
122 // it has been inserted).
123 if (CxtI && CxtI->getParent())
126 // If the value is really an already-inserted instruction, then use that.
127 CxtI = dyn_cast<Instruction>(V);
128 if (CxtI && CxtI->getParent())
134 static void computeKnownBits(const Value *V, KnownBits &Known,
135 unsigned Depth, const Query &Q);
137 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
138 const DataLayout &DL, unsigned Depth,
139 AssumptionCache *AC, const Instruction *CxtI,
140 const DominatorTree *DT,
141 OptimizationRemarkEmitter *ORE) {
142 ::computeKnownBits(V, Known, Depth,
143 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
146 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
149 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
150 unsigned Depth, AssumptionCache *AC,
151 const Instruction *CxtI,
152 const DominatorTree *DT,
153 OptimizationRemarkEmitter *ORE) {
154 return ::computeKnownBits(V, Depth,
155 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
158 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
159 const DataLayout &DL,
160 AssumptionCache *AC, const Instruction *CxtI,
161 const DominatorTree *DT) {
162 assert(LHS->getType() == RHS->getType() &&
163 "LHS and RHS should have the same type");
164 assert(LHS->getType()->isIntOrIntVectorTy() &&
165 "LHS and RHS should be integers");
166 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
167 KnownBits LHSKnown(IT->getBitWidth());
168 KnownBits RHSKnown(IT->getBitWidth());
169 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
170 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
171 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
175 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
176 for (const User *U : CxtI->users()) {
177 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
178 if (IC->isEquality())
179 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
180 if (C->isNullValue())
187 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
190 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
192 unsigned Depth, AssumptionCache *AC,
193 const Instruction *CxtI,
194 const DominatorTree *DT) {
195 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
196 Query(DL, AC, safeCxtI(V, CxtI), DT));
199 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
201 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
202 AssumptionCache *AC, const Instruction *CxtI,
203 const DominatorTree *DT) {
204 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
207 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
209 AssumptionCache *AC, const Instruction *CxtI,
210 const DominatorTree *DT) {
211 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
212 return Known.isNonNegative();
215 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
216 AssumptionCache *AC, const Instruction *CxtI,
217 const DominatorTree *DT) {
218 if (auto *CI = dyn_cast<ConstantInt>(V))
219 return CI->getValue().isStrictlyPositive();
221 // TODO: We'd doing two recursive queries here. We should factor this such
222 // that only a single query is needed.
223 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
224 isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
227 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
228 AssumptionCache *AC, const Instruction *CxtI,
229 const DominatorTree *DT) {
230 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
231 return Known.isNegative();
234 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
236 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
237 const DataLayout &DL,
238 AssumptionCache *AC, const Instruction *CxtI,
239 const DominatorTree *DT) {
240 return ::isKnownNonEqual(V1, V2, Query(DL, AC,
241 safeCxtI(V1, safeCxtI(V2, CxtI)),
245 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
248 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
249 const DataLayout &DL,
250 unsigned Depth, AssumptionCache *AC,
251 const Instruction *CxtI, const DominatorTree *DT) {
252 return ::MaskedValueIsZero(V, Mask, Depth,
253 Query(DL, AC, safeCxtI(V, CxtI), DT));
256 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
259 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
260 unsigned Depth, AssumptionCache *AC,
261 const Instruction *CxtI,
262 const DominatorTree *DT) {
263 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
266 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
268 KnownBits &KnownOut, KnownBits &Known2,
269 unsigned Depth, const Query &Q) {
270 unsigned BitWidth = KnownOut.getBitWidth();
272 // If an initial sequence of bits in the result is not needed, the
273 // corresponding bits in the operands are not needed.
274 KnownBits LHSKnown(BitWidth);
275 computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
276 computeKnownBits(Op1, Known2, Depth + 1, Q);
278 // Carry in a 1 for a subtract, rather than a 0.
279 uint64_t CarryIn = 0;
281 // Sum = LHS + ~RHS + 1
282 std::swap(Known2.Zero, Known2.One);
286 APInt PossibleSumZero = ~LHSKnown.Zero + ~Known2.Zero + CarryIn;
287 APInt PossibleSumOne = LHSKnown.One + Known2.One + CarryIn;
289 // Compute known bits of the carry.
290 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnown.Zero ^ Known2.Zero);
291 APInt CarryKnownOne = PossibleSumOne ^ LHSKnown.One ^ Known2.One;
293 // Compute set of known bits (where all three relevant bits are known).
294 APInt LHSKnownUnion = LHSKnown.Zero | LHSKnown.One;
295 APInt RHSKnownUnion = Known2.Zero | Known2.One;
296 APInt CarryKnownUnion = CarryKnownZero | CarryKnownOne;
297 APInt Known = LHSKnownUnion & RHSKnownUnion & CarryKnownUnion;
299 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
300 "known bits of sum differ");
302 // Compute known bits of the result.
303 KnownOut.Zero = ~PossibleSumOne & Known;
304 KnownOut.One = PossibleSumOne & Known;
306 // Are we still trying to solve for the sign bit?
307 if (!Known.isSignBitSet()) {
309 // Adding two non-negative numbers, or subtracting a negative number from
310 // a non-negative one, can't wrap into negative.
311 if (LHSKnown.isNonNegative() && Known2.isNonNegative())
312 KnownOut.makeNonNegative();
313 // Adding two negative numbers, or subtracting a non-negative number from
314 // a negative one, can't wrap into non-negative.
315 else if (LHSKnown.isNegative() && Known2.isNegative())
316 KnownOut.makeNegative();
321 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
322 KnownBits &Known, KnownBits &Known2,
323 unsigned Depth, const Query &Q) {
324 unsigned BitWidth = Known.getBitWidth();
325 computeKnownBits(Op1, Known, Depth + 1, Q);
326 computeKnownBits(Op0, Known2, Depth + 1, Q);
328 bool isKnownNegative = false;
329 bool isKnownNonNegative = false;
330 // If the multiplication is known not to overflow, compute the sign bit.
333 // The product of a number with itself is non-negative.
334 isKnownNonNegative = true;
336 bool isKnownNonNegativeOp1 = Known.isNonNegative();
337 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
338 bool isKnownNegativeOp1 = Known.isNegative();
339 bool isKnownNegativeOp0 = Known2.isNegative();
340 // The product of two numbers with the same sign is non-negative.
341 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
342 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
343 // The product of a negative number and a non-negative number is either
345 if (!isKnownNonNegative)
346 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
347 isKnownNonZero(Op0, Depth, Q)) ||
348 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
349 isKnownNonZero(Op1, Depth, Q));
353 // If low bits are zero in either operand, output low known-0 bits.
354 // Also compute a conservative estimate for high known-0 bits.
355 // More trickiness is possible, but this is sufficient for the
356 // interesting case of alignment computation.
357 unsigned TrailZ = Known.countMinTrailingZeros() +
358 Known2.countMinTrailingZeros();
359 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
360 Known2.countMinLeadingZeros(),
361 BitWidth) - BitWidth;
363 TrailZ = std::min(TrailZ, BitWidth);
364 LeadZ = std::min(LeadZ, BitWidth);
366 Known.Zero.setLowBits(TrailZ);
367 Known.Zero.setHighBits(LeadZ);
369 // Only make use of no-wrap flags if we failed to compute the sign bit
370 // directly. This matters if the multiplication always overflows, in
371 // which case we prefer to follow the result of the direct computation,
372 // though as the program is invoking undefined behaviour we can choose
373 // whatever we like here.
374 if (isKnownNonNegative && !Known.isNegative())
375 Known.makeNonNegative();
376 else if (isKnownNegative && !Known.isNonNegative())
377 Known.makeNegative();
380 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
382 unsigned BitWidth = Known.getBitWidth();
383 unsigned NumRanges = Ranges.getNumOperands() / 2;
384 assert(NumRanges >= 1);
386 Known.Zero.setAllBits();
387 Known.One.setAllBits();
389 for (unsigned i = 0; i < NumRanges; ++i) {
391 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
393 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
394 ConstantRange Range(Lower->getValue(), Upper->getValue());
396 // The first CommonPrefixBits of all values in Range are equal.
397 unsigned CommonPrefixBits =
398 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
400 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
401 Known.One &= Range.getUnsignedMax() & Mask;
402 Known.Zero &= ~Range.getUnsignedMax() & Mask;
406 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
407 SmallVector<const Value *, 16> WorkSet(1, I);
408 SmallPtrSet<const Value *, 32> Visited;
409 SmallPtrSet<const Value *, 16> EphValues;
411 // The instruction defining an assumption's condition itself is always
412 // considered ephemeral to that assumption (even if it has other
413 // non-ephemeral users). See r246696's test case for an example.
414 if (is_contained(I->operands(), E))
417 while (!WorkSet.empty()) {
418 const Value *V = WorkSet.pop_back_val();
419 if (!Visited.insert(V).second)
422 // If all uses of this value are ephemeral, then so is this value.
423 if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
428 if (const User *U = dyn_cast<User>(V))
429 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
431 if (isSafeToSpeculativelyExecute(*J))
432 WorkSet.push_back(*J);
440 // Is this an intrinsic that cannot be speculated but also cannot trap?
441 static bool isAssumeLikeIntrinsic(const Instruction *I) {
442 if (const CallInst *CI = dyn_cast<CallInst>(I))
443 if (Function *F = CI->getCalledFunction())
444 switch (F->getIntrinsicID()) {
446 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
447 case Intrinsic::assume:
448 case Intrinsic::dbg_declare:
449 case Intrinsic::dbg_value:
450 case Intrinsic::invariant_start:
451 case Intrinsic::invariant_end:
452 case Intrinsic::lifetime_start:
453 case Intrinsic::lifetime_end:
454 case Intrinsic::objectsize:
455 case Intrinsic::ptr_annotation:
456 case Intrinsic::var_annotation:
463 bool llvm::isValidAssumeForContext(const Instruction *Inv,
464 const Instruction *CxtI,
465 const DominatorTree *DT) {
467 // There are two restrictions on the use of an assume:
468 // 1. The assume must dominate the context (or the control flow must
469 // reach the assume whenever it reaches the context).
470 // 2. The context must not be in the assume's set of ephemeral values
471 // (otherwise we will use the assume to prove that the condition
472 // feeding the assume is trivially true, thus causing the removal of
476 if (DT->dominates(Inv, CxtI))
478 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
479 // We don't have a DT, but this trivially dominates.
483 // With or without a DT, the only remaining case we will check is if the
484 // instructions are in the same BB. Give up if that is not the case.
485 if (Inv->getParent() != CxtI->getParent())
488 // If we have a dom tree, then we now know that the assume doens't dominate
489 // the other instruction. If we don't have a dom tree then we can check if
490 // the assume is first in the BB.
492 // Search forward from the assume until we reach the context (or the end
493 // of the block); the common case is that the assume will come first.
494 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
495 IE = Inv->getParent()->end(); I != IE; ++I)
500 // The context comes first, but they're both in the same block. Make sure
501 // there is nothing in between that might interrupt the control flow.
502 for (BasicBlock::const_iterator I =
503 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
505 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
508 return !isEphemeralValueOf(Inv, CxtI);
511 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
512 unsigned Depth, const Query &Q) {
513 // Use of assumptions is context-sensitive. If we don't have a context, we
515 if (!Q.AC || !Q.CxtI)
518 unsigned BitWidth = Known.getBitWidth();
520 // Note that the patterns below need to be kept in sync with the code
521 // in AssumptionCache::updateAffectedValues.
523 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
526 CallInst *I = cast<CallInst>(AssumeVH);
527 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
528 "Got assumption for the wrong function!");
532 // Warning: This loop can end up being somewhat performance sensetive.
533 // We're running this loop for once for each value queried resulting in a
534 // runtime of ~O(#assumes * #values).
536 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
537 "must be an assume intrinsic");
539 Value *Arg = I->getArgOperand(0);
541 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
542 assert(BitWidth == 1 && "assume operand is not i1?");
546 if (match(Arg, m_Not(m_Specific(V))) &&
547 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
548 assert(BitWidth == 1 && "assume operand is not i1?");
553 // The remaining tests are all recursive, so bail out if we hit the limit.
554 if (Depth == MaxDepth)
558 auto m_V = m_CombineOr(m_Specific(V),
559 m_CombineOr(m_PtrToInt(m_Specific(V)),
560 m_BitCast(m_Specific(V))));
562 CmpInst::Predicate Pred;
565 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
566 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
567 KnownBits RHSKnown(BitWidth);
568 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
569 Known.Zero |= RHSKnown.Zero;
570 Known.One |= RHSKnown.One;
572 } else if (match(Arg,
573 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
574 Pred == ICmpInst::ICMP_EQ &&
575 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
576 KnownBits RHSKnown(BitWidth);
577 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
578 KnownBits MaskKnown(BitWidth);
579 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
581 // For those bits in the mask that are known to be one, we can propagate
582 // known bits from the RHS to V.
583 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
584 Known.One |= RHSKnown.One & MaskKnown.One;
585 // assume(~(v & b) = a)
586 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
588 Pred == ICmpInst::ICMP_EQ &&
589 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
590 KnownBits RHSKnown(BitWidth);
591 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
592 KnownBits MaskKnown(BitWidth);
593 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
595 // For those bits in the mask that are known to be one, we can propagate
596 // inverted known bits from the RHS to V.
597 Known.Zero |= RHSKnown.One & MaskKnown.One;
598 Known.One |= RHSKnown.Zero & MaskKnown.One;
600 } else if (match(Arg,
601 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
602 Pred == ICmpInst::ICMP_EQ &&
603 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
604 KnownBits RHSKnown(BitWidth);
605 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
606 KnownBits BKnown(BitWidth);
607 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
609 // For those bits in B that are known to be zero, we can propagate known
610 // bits from the RHS to V.
611 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
612 Known.One |= RHSKnown.One & BKnown.Zero;
613 // assume(~(v | b) = a)
614 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
616 Pred == ICmpInst::ICMP_EQ &&
617 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
618 KnownBits RHSKnown(BitWidth);
619 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
620 KnownBits BKnown(BitWidth);
621 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
623 // For those bits in B that are known to be zero, we can propagate
624 // inverted known bits from the RHS to V.
625 Known.Zero |= RHSKnown.One & BKnown.Zero;
626 Known.One |= RHSKnown.Zero & BKnown.Zero;
628 } else if (match(Arg,
629 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
630 Pred == ICmpInst::ICMP_EQ &&
631 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
632 KnownBits RHSKnown(BitWidth);
633 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
634 KnownBits BKnown(BitWidth);
635 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
637 // For those bits in B that are known to be zero, we can propagate known
638 // bits from the RHS to V. For those bits in B that are known to be one,
639 // we can propagate inverted known bits from the RHS to V.
640 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
641 Known.One |= RHSKnown.One & BKnown.Zero;
642 Known.Zero |= RHSKnown.One & BKnown.One;
643 Known.One |= RHSKnown.Zero & BKnown.One;
644 // assume(~(v ^ b) = a)
645 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
647 Pred == ICmpInst::ICMP_EQ &&
648 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
649 KnownBits RHSKnown(BitWidth);
650 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
651 KnownBits BKnown(BitWidth);
652 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
654 // For those bits in B that are known to be zero, we can propagate
655 // inverted known bits from the RHS to V. For those bits in B that are
656 // known to be one, we can propagate known bits from the RHS to V.
657 Known.Zero |= RHSKnown.One & BKnown.Zero;
658 Known.One |= RHSKnown.Zero & BKnown.Zero;
659 Known.Zero |= RHSKnown.Zero & BKnown.One;
660 Known.One |= RHSKnown.One & BKnown.One;
661 // assume(v << c = a)
662 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
664 Pred == ICmpInst::ICMP_EQ &&
665 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
666 KnownBits RHSKnown(BitWidth);
667 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
668 // For those bits in RHS that are known, we can propagate them to known
669 // bits in V shifted to the right by C.
670 RHSKnown.Zero.lshrInPlace(C->getZExtValue());
671 Known.Zero |= RHSKnown.Zero;
672 RHSKnown.One.lshrInPlace(C->getZExtValue());
673 Known.One |= RHSKnown.One;
674 // assume(~(v << c) = a)
675 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
677 Pred == ICmpInst::ICMP_EQ &&
678 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
679 KnownBits RHSKnown(BitWidth);
680 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
681 // For those bits in RHS that are known, we can propagate them inverted
682 // to known bits in V shifted to the right by C.
683 RHSKnown.One.lshrInPlace(C->getZExtValue());
684 Known.Zero |= RHSKnown.One;
685 RHSKnown.Zero.lshrInPlace(C->getZExtValue());
686 Known.One |= RHSKnown.Zero;
687 // assume(v >> c = a)
688 } else if (match(Arg,
689 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
691 Pred == ICmpInst::ICMP_EQ &&
692 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
693 KnownBits RHSKnown(BitWidth);
694 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
695 // For those bits in RHS that are known, we can propagate them to known
696 // bits in V shifted to the right by C.
697 Known.Zero |= RHSKnown.Zero << C->getZExtValue();
698 Known.One |= RHSKnown.One << C->getZExtValue();
699 // assume(~(v >> c) = a)
700 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
702 Pred == ICmpInst::ICMP_EQ &&
703 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
704 KnownBits RHSKnown(BitWidth);
705 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
706 // For those bits in RHS that are known, we can propagate them inverted
707 // to known bits in V shifted to the right by C.
708 Known.Zero |= RHSKnown.One << C->getZExtValue();
709 Known.One |= RHSKnown.Zero << C->getZExtValue();
710 // assume(v >=_s c) where c is non-negative
711 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
712 Pred == ICmpInst::ICMP_SGE &&
713 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
714 KnownBits RHSKnown(BitWidth);
715 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
717 if (RHSKnown.isNonNegative()) {
718 // We know that the sign bit is zero.
719 Known.makeNonNegative();
721 // assume(v >_s c) where c is at least -1.
722 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
723 Pred == ICmpInst::ICMP_SGT &&
724 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
725 KnownBits RHSKnown(BitWidth);
726 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
729 // We know that the sign bit is zero.
730 Known.makeNonNegative();
732 // assume(v <=_s c) where c is negative
733 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
734 Pred == ICmpInst::ICMP_SLE &&
735 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
736 KnownBits RHSKnown(BitWidth);
737 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
739 if (RHSKnown.isNegative()) {
740 // We know that the sign bit is one.
741 Known.makeNegative();
743 // assume(v <_s c) where c is non-positive
744 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
745 Pred == ICmpInst::ICMP_SLT &&
746 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
747 KnownBits RHSKnown(BitWidth);
748 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
750 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
751 // We know that the sign bit is one.
752 Known.makeNegative();
755 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
756 Pred == ICmpInst::ICMP_ULE &&
757 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
758 KnownBits RHSKnown(BitWidth);
759 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
761 // Whatever high bits in c are zero are known to be zero.
762 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
764 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
765 Pred == ICmpInst::ICMP_ULT &&
766 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
767 KnownBits RHSKnown(BitWidth);
768 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
770 // Whatever high bits in c are zero are known to be zero (if c is a power
771 // of 2, then one more).
772 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
773 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
775 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
779 // If assumptions conflict with each other or previous known bits, then we
780 // have a logical fallacy. It's possible that the assumption is not reachable,
781 // so this isn't a real bug. On the other hand, the program may have undefined
782 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
783 // clear out the known bits, try to warn the user, and hope for the best.
784 if (Known.Zero.intersects(Known.One)) {
788 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
789 OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI);
790 Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may "
791 "have undefined behavior, or compiler may have "
797 // Compute known bits from a shift operator, including those with a
798 // non-constant shift amount. Known is the outputs of this function. Known2 is a
799 // pre-allocated temporary with the/ same bit width as Known. KZF and KOF are
800 // operator-specific functors that, given the known-zero or known-one bits
801 // respectively, and a shift amount, compute the implied known-zero or known-one
802 // bits of the shift operator's result respectively for that shift amount. The
803 // results from calling KZF and KOF are conservatively combined for all
804 // permitted shift amounts.
805 static void computeKnownBitsFromShiftOperator(
806 const Operator *I, KnownBits &Known, KnownBits &Known2,
807 unsigned Depth, const Query &Q,
808 function_ref<APInt(const APInt &, unsigned)> KZF,
809 function_ref<APInt(const APInt &, unsigned)> KOF) {
810 unsigned BitWidth = Known.getBitWidth();
812 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
813 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
815 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
816 Known.Zero = KZF(Known.Zero, ShiftAmt);
817 Known.One = KOF(Known.One, ShiftAmt);
818 // If there is conflict between Known.Zero and Known.One, this must be an
819 // overflowing left shift, so the shift result is undefined. Clear Known
820 // bits so that other code could propagate this undef.
821 if ((Known.Zero & Known.One) != 0)
827 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
829 // If the shift amount could be greater than or equal to the bit-width of the LHS, the
830 // value could be undef, so we don't know anything about it.
831 if ((~Known.Zero).uge(BitWidth)) {
836 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
837 // BitWidth > 64 and any upper bits are known, we'll end up returning the
838 // limit value (which implies all bits are known).
839 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
840 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
842 // It would be more-clearly correct to use the two temporaries for this
843 // calculation. Reusing the APInts here to prevent unnecessary allocations.
846 // If we know the shifter operand is nonzero, we can sometimes infer more
847 // known bits. However this is expensive to compute, so be lazy about it and
848 // only compute it when absolutely necessary.
849 Optional<bool> ShifterOperandIsNonZero;
851 // Early exit if we can't constrain any well-defined shift amount.
852 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
853 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
854 ShifterOperandIsNonZero =
855 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
856 if (!*ShifterOperandIsNonZero)
860 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
862 Known.Zero.setAllBits();
863 Known.One.setAllBits();
864 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
865 // Combine the shifted known input bits only for those shift amounts
866 // compatible with its known constraints.
867 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
869 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
871 // If we know the shifter is nonzero, we may be able to infer more known
872 // bits. This check is sunk down as far as possible to avoid the expensive
873 // call to isKnownNonZero if the cheaper checks above fail.
875 if (!ShifterOperandIsNonZero.hasValue())
876 ShifterOperandIsNonZero =
877 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
878 if (*ShifterOperandIsNonZero)
882 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
883 Known.One &= KOF(Known2.One, ShiftAmt);
886 // If there are no compatible shift amounts, then we've proven that the shift
887 // amount must be >= the BitWidth, and the result is undefined. We could
888 // return anything we'd like, but we need to make sure the sets of known bits
889 // stay disjoint (it should be better for some other code to actually
890 // propagate the undef than to pick a value here using known bits).
891 if (Known.Zero.intersects(Known.One))
895 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
896 unsigned Depth, const Query &Q) {
897 unsigned BitWidth = Known.getBitWidth();
899 KnownBits Known2(Known);
900 switch (I->getOpcode()) {
902 case Instruction::Load:
903 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
904 computeKnownBitsFromRangeMetadata(*MD, Known);
906 case Instruction::And: {
907 // If either the LHS or the RHS are Zero, the result is zero.
908 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
909 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
911 // Output known-1 bits are only known if set in both the LHS & RHS.
912 Known.One &= Known2.One;
913 // Output known-0 are known to be clear if zero in either the LHS | RHS.
914 Known.Zero |= Known2.Zero;
916 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
917 // here we handle the more general case of adding any odd number by
918 // matching the form add(x, add(x, y)) where y is odd.
919 // TODO: This could be generalized to clearing any bit set in y where the
920 // following bit is known to be unset in y.
922 if (!Known.Zero[0] && !Known.One[0] &&
923 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
925 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
928 computeKnownBits(Y, Known2, Depth + 1, Q);
929 if (Known2.countMinTrailingOnes() > 0)
930 Known.Zero.setBit(0);
934 case Instruction::Or: {
935 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
936 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
938 // Output known-0 bits are only known if clear in both the LHS & RHS.
939 Known.Zero &= Known2.Zero;
940 // Output known-1 are known to be set if set in either the LHS | RHS.
941 Known.One |= Known2.One;
944 case Instruction::Xor: {
945 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
946 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
948 // Output known-0 bits are known if clear or set in both the LHS & RHS.
949 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
950 // Output known-1 are known to be set if set in only one of the LHS, RHS.
951 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
952 Known.Zero = std::move(KnownZeroOut);
955 case Instruction::Mul: {
956 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
957 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
961 case Instruction::UDiv: {
962 // For the purposes of computing leading zeros we can conservatively
963 // treat a udiv as a logical right shift by the power of 2 known to
964 // be less than the denominator.
965 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
966 unsigned LeadZ = Known2.countMinLeadingZeros();
969 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
970 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
971 if (RHSMaxLeadingZeros != BitWidth)
972 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
974 Known.Zero.setHighBits(LeadZ);
977 case Instruction::Select: {
978 const Value *LHS, *RHS;
979 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
980 if (SelectPatternResult::isMinOrMax(SPF)) {
981 computeKnownBits(RHS, Known, Depth + 1, Q);
982 computeKnownBits(LHS, Known2, Depth + 1, Q);
984 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
985 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
988 unsigned MaxHighOnes = 0;
989 unsigned MaxHighZeros = 0;
990 if (SPF == SPF_SMAX) {
991 // If both sides are negative, the result is negative.
992 if (Known.isNegative() && Known2.isNegative())
993 // We can derive a lower bound on the result by taking the max of the
996 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
997 // If either side is non-negative, the result is non-negative.
998 else if (Known.isNonNegative() || Known2.isNonNegative())
1000 } else if (SPF == SPF_SMIN) {
1001 // If both sides are non-negative, the result is non-negative.
1002 if (Known.isNonNegative() && Known2.isNonNegative())
1003 // We can derive an upper bound on the result by taking the max of the
1004 // leading zero bits.
1005 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1006 Known2.countMinLeadingZeros());
1007 // If either side is negative, the result is negative.
1008 else if (Known.isNegative() || Known2.isNegative())
1010 } else if (SPF == SPF_UMAX) {
1011 // We can derive a lower bound on the result by taking the max of the
1012 // leading one bits.
1014 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1015 } else if (SPF == SPF_UMIN) {
1016 // We can derive an upper bound on the result by taking the max of the
1017 // leading zero bits.
1019 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1022 // Only known if known in both the LHS and RHS.
1023 Known.One &= Known2.One;
1024 Known.Zero &= Known2.Zero;
1025 if (MaxHighOnes > 0)
1026 Known.One.setHighBits(MaxHighOnes);
1027 if (MaxHighZeros > 0)
1028 Known.Zero.setHighBits(MaxHighZeros);
1031 case Instruction::FPTrunc:
1032 case Instruction::FPExt:
1033 case Instruction::FPToUI:
1034 case Instruction::FPToSI:
1035 case Instruction::SIToFP:
1036 case Instruction::UIToFP:
1037 break; // Can't work with floating point.
1038 case Instruction::PtrToInt:
1039 case Instruction::IntToPtr:
1040 // Fall through and handle them the same as zext/trunc.
1042 case Instruction::ZExt:
1043 case Instruction::Trunc: {
1044 Type *SrcTy = I->getOperand(0)->getType();
1046 unsigned SrcBitWidth;
1047 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1048 // which fall through here.
1049 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1051 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1052 Known = Known.zextOrTrunc(SrcBitWidth);
1053 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1054 Known = Known.zextOrTrunc(BitWidth);
1055 // Any top bits are known to be zero.
1056 if (BitWidth > SrcBitWidth)
1057 Known.Zero.setBitsFrom(SrcBitWidth);
1060 case Instruction::BitCast: {
1061 Type *SrcTy = I->getOperand(0)->getType();
1062 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1063 // TODO: For now, not handling conversions like:
1064 // (bitcast i64 %x to <2 x i32>)
1065 !I->getType()->isVectorTy()) {
1066 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1071 case Instruction::SExt: {
1072 // Compute the bits in the result that are not present in the input.
1073 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1075 Known = Known.trunc(SrcBitWidth);
1076 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1077 // If the sign bit of the input is known set or clear, then we know the
1078 // top bits of the result.
1079 Known = Known.sext(BitWidth);
1082 case Instruction::Shl: {
1083 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1084 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1085 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1086 APInt KZResult = KnownZero << ShiftAmt;
1087 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1088 // If this shift has "nsw" keyword, then the result is either a poison
1089 // value or has the same sign bit as the first operand.
1090 if (NSW && KnownZero.isSignBitSet())
1091 KZResult.setSignBit();
1095 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1096 APInt KOResult = KnownOne << ShiftAmt;
1097 if (NSW && KnownOne.isSignBitSet())
1098 KOResult.setSignBit();
1102 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1105 case Instruction::LShr: {
1106 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1107 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1108 APInt KZResult = KnownZero.lshr(ShiftAmt);
1109 // High bits known zero.
1110 KZResult.setHighBits(ShiftAmt);
1114 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1115 return KnownOne.lshr(ShiftAmt);
1118 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1121 case Instruction::AShr: {
1122 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1123 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1124 return KnownZero.ashr(ShiftAmt);
1127 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1128 return KnownOne.ashr(ShiftAmt);
1131 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1134 case Instruction::Sub: {
1135 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1136 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1137 Known, Known2, Depth, Q);
1140 case Instruction::Add: {
1141 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1142 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1143 Known, Known2, Depth, Q);
1146 case Instruction::SRem:
1147 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1148 APInt RA = Rem->getValue().abs();
1149 if (RA.isPowerOf2()) {
1150 APInt LowBits = RA - 1;
1151 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1153 // The low bits of the first operand are unchanged by the srem.
1154 Known.Zero = Known2.Zero & LowBits;
1155 Known.One = Known2.One & LowBits;
1157 // If the first operand is non-negative or has all low bits zero, then
1158 // the upper bits are all zero.
1159 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1160 Known.Zero |= ~LowBits;
1162 // If the first operand is negative and not all low bits are zero, then
1163 // the upper bits are all one.
1164 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1165 Known.One |= ~LowBits;
1167 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1172 // The sign bit is the LHS's sign bit, except when the result of the
1173 // remainder is zero.
1174 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1175 // If it's known zero, our sign bit is also zero.
1176 if (Known2.isNonNegative())
1177 Known.makeNonNegative();
1180 case Instruction::URem: {
1181 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1182 const APInt &RA = Rem->getValue();
1183 if (RA.isPowerOf2()) {
1184 APInt LowBits = (RA - 1);
1185 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1186 Known.Zero |= ~LowBits;
1187 Known.One &= LowBits;
1192 // Since the result is less than or equal to either operand, any leading
1193 // zero bits in either operand must also exist in the result.
1194 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1195 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1198 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1200 Known.Zero.setHighBits(Leaders);
1204 case Instruction::Alloca: {
1205 const AllocaInst *AI = cast<AllocaInst>(I);
1206 unsigned Align = AI->getAlignment();
1208 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1211 Known.Zero.setLowBits(countTrailingZeros(Align));
1214 case Instruction::GetElementPtr: {
1215 // Analyze all of the subscripts of this getelementptr instruction
1216 // to determine if we can prove known low zero bits.
1217 KnownBits LocalKnown(BitWidth);
1218 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1219 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1221 gep_type_iterator GTI = gep_type_begin(I);
1222 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1223 Value *Index = I->getOperand(i);
1224 if (StructType *STy = GTI.getStructTypeOrNull()) {
1225 // Handle struct member offset arithmetic.
1227 // Handle case when index is vector zeroinitializer
1228 Constant *CIndex = cast<Constant>(Index);
1229 if (CIndex->isZeroValue())
1232 if (CIndex->getType()->isVectorTy())
1233 Index = CIndex->getSplatValue();
1235 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1236 const StructLayout *SL = Q.DL.getStructLayout(STy);
1237 uint64_t Offset = SL->getElementOffset(Idx);
1238 TrailZ = std::min<unsigned>(TrailZ,
1239 countTrailingZeros(Offset));
1241 // Handle array index arithmetic.
1242 Type *IndexedTy = GTI.getIndexedType();
1243 if (!IndexedTy->isSized()) {
1247 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1248 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1249 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1250 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1251 TrailZ = std::min(TrailZ,
1252 unsigned(countTrailingZeros(TypeSize) +
1253 LocalKnown.countMinTrailingZeros()));
1257 Known.Zero.setLowBits(TrailZ);
1260 case Instruction::PHI: {
1261 const PHINode *P = cast<PHINode>(I);
1262 // Handle the case of a simple two-predecessor recurrence PHI.
1263 // There's a lot more that could theoretically be done here, but
1264 // this is sufficient to catch some interesting cases.
1265 if (P->getNumIncomingValues() == 2) {
1266 for (unsigned i = 0; i != 2; ++i) {
1267 Value *L = P->getIncomingValue(i);
1268 Value *R = P->getIncomingValue(!i);
1269 Operator *LU = dyn_cast<Operator>(L);
1272 unsigned Opcode = LU->getOpcode();
1273 // Check for operations that have the property that if
1274 // both their operands have low zero bits, the result
1275 // will have low zero bits.
1276 if (Opcode == Instruction::Add ||
1277 Opcode == Instruction::Sub ||
1278 Opcode == Instruction::And ||
1279 Opcode == Instruction::Or ||
1280 Opcode == Instruction::Mul) {
1281 Value *LL = LU->getOperand(0);
1282 Value *LR = LU->getOperand(1);
1283 // Find a recurrence.
1290 // Ok, we have a PHI of the form L op= R. Check for low
1292 computeKnownBits(R, Known2, Depth + 1, Q);
1294 // We need to take the minimum number of known bits
1295 KnownBits Known3(Known);
1296 computeKnownBits(L, Known3, Depth + 1, Q);
1298 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1299 Known3.countMinTrailingZeros()));
1301 if (DontImproveNonNegativePhiBits)
1304 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1305 if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1306 // If initial value of recurrence is nonnegative, and we are adding
1307 // a nonnegative number with nsw, the result can only be nonnegative
1308 // or poison value regardless of the number of times we execute the
1309 // add in phi recurrence. If initial value is negative and we are
1310 // adding a negative number with nsw, the result can only be
1311 // negative or poison value. Similar arguments apply to sub and mul.
1313 // (add non-negative, non-negative) --> non-negative
1314 // (add negative, negative) --> negative
1315 if (Opcode == Instruction::Add) {
1316 if (Known2.isNonNegative() && Known3.isNonNegative())
1317 Known.makeNonNegative();
1318 else if (Known2.isNegative() && Known3.isNegative())
1319 Known.makeNegative();
1322 // (sub nsw non-negative, negative) --> non-negative
1323 // (sub nsw negative, non-negative) --> negative
1324 else if (Opcode == Instruction::Sub && LL == I) {
1325 if (Known2.isNonNegative() && Known3.isNegative())
1326 Known.makeNonNegative();
1327 else if (Known2.isNegative() && Known3.isNonNegative())
1328 Known.makeNegative();
1331 // (mul nsw non-negative, non-negative) --> non-negative
1332 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1333 Known3.isNonNegative())
1334 Known.makeNonNegative();
1342 // Unreachable blocks may have zero-operand PHI nodes.
1343 if (P->getNumIncomingValues() == 0)
1346 // Otherwise take the unions of the known bit sets of the operands,
1347 // taking conservative care to avoid excessive recursion.
1348 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1349 // Skip if every incoming value references to ourself.
1350 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1353 Known.Zero.setAllBits();
1354 Known.One.setAllBits();
1355 for (Value *IncValue : P->incoming_values()) {
1356 // Skip direct self references.
1357 if (IncValue == P) continue;
1359 Known2 = KnownBits(BitWidth);
1360 // Recurse, but cap the recursion to one level, because we don't
1361 // want to waste time spinning around in loops.
1362 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1363 Known.Zero &= Known2.Zero;
1364 Known.One &= Known2.One;
1365 // If all bits have been ruled out, there's no need to check
1367 if (!Known.Zero && !Known.One)
1373 case Instruction::Call:
1374 case Instruction::Invoke:
1375 // If range metadata is attached to this call, set known bits from that,
1376 // and then intersect with known bits based on other properties of the
1378 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1379 computeKnownBitsFromRangeMetadata(*MD, Known);
1380 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1381 computeKnownBits(RV, Known2, Depth + 1, Q);
1382 Known.Zero |= Known2.Zero;
1383 Known.One |= Known2.One;
1385 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1386 switch (II->getIntrinsicID()) {
1388 case Intrinsic::bitreverse:
1389 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1390 Known.Zero |= Known2.Zero.reverseBits();
1391 Known.One |= Known2.One.reverseBits();
1393 case Intrinsic::bswap:
1394 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1395 Known.Zero |= Known2.Zero.byteSwap();
1396 Known.One |= Known2.One.byteSwap();
1398 case Intrinsic::ctlz: {
1399 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1400 // If we have a known 1, its position is our upper bound.
1401 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1402 // If this call is undefined for 0, the result will be less than 2^n.
1403 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1404 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1405 unsigned LowBits = Log2_32(PossibleLZ)+1;
1406 Known.Zero.setBitsFrom(LowBits);
1409 case Intrinsic::cttz: {
1410 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1411 // If we have a known 1, its position is our upper bound.
1412 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1413 // If this call is undefined for 0, the result will be less than 2^n.
1414 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1415 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1416 unsigned LowBits = Log2_32(PossibleTZ)+1;
1417 Known.Zero.setBitsFrom(LowBits);
1420 case Intrinsic::ctpop: {
1421 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1422 // We can bound the space the count needs. Also, bits known to be zero
1423 // can't contribute to the population.
1424 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1425 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1426 Known.Zero.setBitsFrom(LowBits);
1427 // TODO: we could bound KnownOne using the lower bound on the number
1428 // of bits which might be set provided by popcnt KnownOne2.
1431 case Intrinsic::x86_sse42_crc32_64_64:
1432 Known.Zero.setBitsFrom(32);
1437 case Instruction::ExtractElement:
1438 // Look through extract element. At the moment we keep this simple and skip
1439 // tracking the specific element. But at least we might find information
1440 // valid for all elements of the vector (for example if vector is sign
1441 // extended, shifted, etc).
1442 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1444 case Instruction::ExtractValue:
1445 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1446 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1447 if (EVI->getNumIndices() != 1) break;
1448 if (EVI->getIndices()[0] == 0) {
1449 switch (II->getIntrinsicID()) {
1451 case Intrinsic::uadd_with_overflow:
1452 case Intrinsic::sadd_with_overflow:
1453 computeKnownBitsAddSub(true, II->getArgOperand(0),
1454 II->getArgOperand(1), false, Known, Known2,
1457 case Intrinsic::usub_with_overflow:
1458 case Intrinsic::ssub_with_overflow:
1459 computeKnownBitsAddSub(false, II->getArgOperand(0),
1460 II->getArgOperand(1), false, Known, Known2,
1463 case Intrinsic::umul_with_overflow:
1464 case Intrinsic::smul_with_overflow:
1465 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1466 Known, Known2, Depth, Q);
1474 /// Determine which bits of V are known to be either zero or one and return
1476 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1477 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1478 computeKnownBits(V, Known, Depth, Q);
1482 /// Determine which bits of V are known to be either zero or one and return
1483 /// them in the Known bit set.
1485 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1486 /// we cannot optimize based on the assumption that it is zero without changing
1487 /// it to be an explicit zero. If we don't change it to zero, other code could
1488 /// optimized based on the contradictory assumption that it is non-zero.
1489 /// Because instcombine aggressively folds operations with undef args anyway,
1490 /// this won't lose us code quality.
1492 /// This function is defined on values with integer type, values with pointer
1493 /// type, and vectors of integers. In the case
1494 /// where V is a vector, known zero, and known one values are the
1495 /// same width as the vector element, and the bit is set only if it is true
1496 /// for all of the elements in the vector.
1497 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1499 assert(V && "No Value?");
1500 assert(Depth <= MaxDepth && "Limit Search Depth");
1501 unsigned BitWidth = Known.getBitWidth();
1503 assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1504 V->getType()->isPtrOrPtrVectorTy()) &&
1505 "Not integer or pointer type!");
1506 assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
1507 "V and Known should have same BitWidth");
1511 if (match(V, m_APInt(C))) {
1512 // We know all of the bits for a scalar constant or a splat vector constant!
1514 Known.Zero = ~Known.One;
1517 // Null and aggregate-zero are all-zeros.
1518 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1522 // Handle a constant vector by taking the intersection of the known bits of
1524 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1525 // We know that CDS must be a vector of integers. Take the intersection of
1527 Known.Zero.setAllBits(); Known.One.setAllBits();
1528 APInt Elt(BitWidth, 0);
1529 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1530 Elt = CDS->getElementAsInteger(i);
1537 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1538 // We know that CV must be a vector of integers. Take the intersection of
1540 Known.Zero.setAllBits(); Known.One.setAllBits();
1541 APInt Elt(BitWidth, 0);
1542 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1543 Constant *Element = CV->getAggregateElement(i);
1544 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1549 Elt = ElementCI->getValue();
1556 // Start out not knowing anything.
1559 // We can't imply anything about undefs.
1560 if (isa<UndefValue>(V))
1563 // There's no point in looking through other users of ConstantData for
1564 // assumptions. Confirm that we've handled them all.
1565 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1567 // Limit search depth.
1568 // All recursive calls that increase depth must come after this.
1569 if (Depth == MaxDepth)
1572 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1573 // the bits of its aliasee.
1574 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1575 if (!GA->isInterposable())
1576 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1580 if (const Operator *I = dyn_cast<Operator>(V))
1581 computeKnownBitsFromOperator(I, Known, Depth, Q);
1583 // Aligned pointers have trailing zeros - refine Known.Zero set
1584 if (V->getType()->isPointerTy()) {
1585 unsigned Align = V->getPointerAlignment(Q.DL);
1587 Known.Zero.setLowBits(countTrailingZeros(Align));
1590 // computeKnownBitsFromAssume strictly refines Known.
1591 // Therefore, we run them after computeKnownBitsFromOperator.
1593 // Check whether a nearby assume intrinsic can determine some known bits.
1594 computeKnownBitsFromAssume(V, Known, Depth, Q);
1596 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1599 /// Return true if the given value is known to have exactly one
1600 /// bit set when defined. For vectors return true if every element is known to
1601 /// be a power of two when defined. Supports values with integer or pointer
1602 /// types and vectors of integers.
1603 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1605 if (const Constant *C = dyn_cast<Constant>(V)) {
1606 if (C->isNullValue())
1609 const APInt *ConstIntOrConstSplatInt;
1610 if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1611 return ConstIntOrConstSplatInt->isPowerOf2();
1614 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1615 // it is shifted off the end then the result is undefined.
1616 if (match(V, m_Shl(m_One(), m_Value())))
1619 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1620 // the bottom. If it is shifted off the bottom then the result is undefined.
1621 if (match(V, m_LShr(m_SignMask(), m_Value())))
1624 // The remaining tests are all recursive, so bail out if we hit the limit.
1625 if (Depth++ == MaxDepth)
1628 Value *X = nullptr, *Y = nullptr;
1629 // A shift left or a logical shift right of a power of two is a power of two
1631 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1632 match(V, m_LShr(m_Value(X), m_Value()))))
1633 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1635 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1636 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1638 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1639 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1640 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1642 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1643 // A power of two and'd with anything is a power of two or zero.
1644 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1645 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1647 // X & (-X) is always a power of two or zero.
1648 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1653 // Adding a power-of-two or zero to the same power-of-two or zero yields
1654 // either the original power-of-two, a larger power-of-two or zero.
1655 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1656 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1657 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1658 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1659 match(X, m_And(m_Value(), m_Specific(Y))))
1660 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1662 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1663 match(Y, m_And(m_Value(), m_Specific(X))))
1664 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1667 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1668 KnownBits LHSBits(BitWidth);
1669 computeKnownBits(X, LHSBits, Depth, Q);
1671 KnownBits RHSBits(BitWidth);
1672 computeKnownBits(Y, RHSBits, Depth, Q);
1673 // If i8 V is a power of two or zero:
1674 // ZeroBits: 1 1 1 0 1 1 1 1
1675 // ~ZeroBits: 0 0 0 1 0 0 0 0
1676 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1677 // If OrZero isn't set, we cannot give back a zero result.
1678 // Make sure either the LHS or RHS has a bit set.
1679 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1684 // An exact divide or right shift can only shift off zero bits, so the result
1685 // is a power of two only if the first operand is a power of two and not
1686 // copying a sign bit (sdiv int_min, 2).
1687 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1688 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1689 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1696 /// \brief Test whether a GEP's result is known to be non-null.
1698 /// Uses properties inherent in a GEP to try to determine whether it is known
1701 /// Currently this routine does not support vector GEPs.
1702 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1704 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1707 // FIXME: Support vector-GEPs.
1708 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1710 // If the base pointer is non-null, we cannot walk to a null address with an
1711 // inbounds GEP in address space zero.
1712 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1715 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1716 // If so, then the GEP cannot produce a null pointer, as doing so would
1717 // inherently violate the inbounds contract within address space zero.
1718 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1719 GTI != GTE; ++GTI) {
1720 // Struct types are easy -- they must always be indexed by a constant.
1721 if (StructType *STy = GTI.getStructTypeOrNull()) {
1722 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1723 unsigned ElementIdx = OpC->getZExtValue();
1724 const StructLayout *SL = Q.DL.getStructLayout(STy);
1725 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1726 if (ElementOffset > 0)
1731 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1732 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1735 // Fast path the constant operand case both for efficiency and so we don't
1736 // increment Depth when just zipping down an all-constant GEP.
1737 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1743 // We post-increment Depth here because while isKnownNonZero increments it
1744 // as well, when we pop back up that increment won't persist. We don't want
1745 // to recurse 10k times just because we have 10k GEP operands. We don't
1746 // bail completely out because we want to handle constant GEPs regardless
1748 if (Depth++ >= MaxDepth)
1751 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1758 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1759 /// ensure that the value it's attached to is never Value? 'RangeType' is
1760 /// is the type of the value described by the range.
1761 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1762 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1763 assert(NumRanges >= 1);
1764 for (unsigned i = 0; i < NumRanges; ++i) {
1765 ConstantInt *Lower =
1766 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1767 ConstantInt *Upper =
1768 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1769 ConstantRange Range(Lower->getValue(), Upper->getValue());
1770 if (Range.contains(Value))
1776 /// Return true if the given value is known to be non-zero when defined. For
1777 /// vectors, return true if every element is known to be non-zero when
1778 /// defined. For pointers, if the context instruction and dominator tree are
1779 /// specified, perform context-sensitive analysis and return true if the
1780 /// pointer couldn't possibly be null at the specified instruction.
1781 /// Supports values with integer or pointer type and vectors of integers.
1782 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1783 if (auto *C = dyn_cast<Constant>(V)) {
1784 if (C->isNullValue())
1786 if (isa<ConstantInt>(C))
1787 // Must be non-zero due to null test above.
1790 // For constant vectors, check that all elements are undefined or known
1791 // non-zero to determine that the whole vector is known non-zero.
1792 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1793 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1794 Constant *Elt = C->getAggregateElement(i);
1795 if (!Elt || Elt->isNullValue())
1797 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1806 if (auto *I = dyn_cast<Instruction>(V)) {
1807 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1808 // If the possible ranges don't contain zero, then the value is
1809 // definitely non-zero.
1810 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1811 const APInt ZeroValue(Ty->getBitWidth(), 0);
1812 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1818 // The remaining tests are all recursive, so bail out if we hit the limit.
1819 if (Depth++ >= MaxDepth)
1822 // Check for pointer simplifications.
1823 if (V->getType()->isPointerTy()) {
1824 if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
1826 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1827 if (isGEPKnownNonNull(GEP, Depth, Q))
1831 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1833 // X | Y != 0 if X != 0 or Y != 0.
1834 Value *X = nullptr, *Y = nullptr;
1835 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1836 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1838 // ext X != 0 if X != 0.
1839 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1840 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1842 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1843 // if the lowest bit is shifted off the end.
1844 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1845 // shl nuw can't remove any non-zero bits.
1846 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1847 if (BO->hasNoUnsignedWrap())
1848 return isKnownNonZero(X, Depth, Q);
1850 KnownBits Known(BitWidth);
1851 computeKnownBits(X, Known, Depth, Q);
1855 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1856 // defined if the sign bit is shifted off the end.
1857 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1858 // shr exact can only shift out zero bits.
1859 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1861 return isKnownNonZero(X, Depth, Q);
1863 KnownBits Known = computeKnownBits(X, Depth, Q);
1864 if (Known.isNegative())
1867 // If the shifter operand is a constant, and all of the bits shifted
1868 // out are known to be zero, and X is known non-zero then at least one
1869 // non-zero bit must remain.
1870 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1871 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1872 // Is there a known one in the portion not shifted out?
1873 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
1875 // Are all the bits to be shifted out known zero?
1876 if (Known.countMinTrailingZeros() >= ShiftVal)
1877 return isKnownNonZero(X, Depth, Q);
1880 // div exact can only produce a zero if the dividend is zero.
1881 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1882 return isKnownNonZero(X, Depth, Q);
1885 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1886 KnownBits XKnown = computeKnownBits(X, Depth, Q);
1887 KnownBits YKnown = computeKnownBits(Y, Depth, Q);
1889 // If X and Y are both non-negative (as signed values) then their sum is not
1890 // zero unless both X and Y are zero.
1891 if (XKnown.isNonNegative() && YKnown.isNonNegative())
1892 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1895 // If X and Y are both negative (as signed values) then their sum is not
1896 // zero unless both X and Y equal INT_MIN.
1897 if (XKnown.isNegative() && YKnown.isNegative()) {
1898 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1899 // The sign bit of X is set. If some other bit is set then X is not equal
1901 if (XKnown.One.intersects(Mask))
1903 // The sign bit of Y is set. If some other bit is set then Y is not equal
1905 if (YKnown.One.intersects(Mask))
1909 // The sum of a non-negative number and a power of two is not zero.
1910 if (XKnown.isNonNegative() &&
1911 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1913 if (YKnown.isNonNegative() &&
1914 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1918 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1919 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1920 // If X and Y are non-zero then so is X * Y as long as the multiplication
1921 // does not overflow.
1922 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1923 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1926 // (C ? X : Y) != 0 if X != 0 and Y != 0.
1927 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1928 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1929 isKnownNonZero(SI->getFalseValue(), Depth, Q))
1933 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1934 // Try and detect a recurrence that monotonically increases from a
1935 // starting value, as these are common as induction variables.
1936 if (PN->getNumIncomingValues() == 2) {
1937 Value *Start = PN->getIncomingValue(0);
1938 Value *Induction = PN->getIncomingValue(1);
1939 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1940 std::swap(Start, Induction);
1941 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1942 if (!C->isZero() && !C->isNegative()) {
1944 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1945 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1951 // Check if all incoming values are non-zero constant.
1952 bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1953 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
1955 if (AllNonZeroConstants)
1959 KnownBits Known(BitWidth);
1960 computeKnownBits(V, Known, Depth, Q);
1961 return Known.One != 0;
1964 /// Return true if V2 == V1 + X, where X is known non-zero.
1965 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
1966 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1967 if (!BO || BO->getOpcode() != Instruction::Add)
1969 Value *Op = nullptr;
1970 if (V2 == BO->getOperand(0))
1971 Op = BO->getOperand(1);
1972 else if (V2 == BO->getOperand(1))
1973 Op = BO->getOperand(0);
1976 return isKnownNonZero(Op, 0, Q);
1979 /// Return true if it is known that V1 != V2.
1980 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
1983 if (V1->getType() != V2->getType())
1984 // We can't look through casts yet.
1986 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1989 if (V1->getType()->isIntOrIntVectorTy()) {
1990 // Are any known bits in V1 contradictory to known bits in V2? If V1
1991 // has a known zero where V2 has a known one, they must not be equal.
1992 KnownBits Known1 = computeKnownBits(V1, 0, Q);
1993 KnownBits Known2 = computeKnownBits(V2, 0, Q);
1995 if (Known1.Zero.intersects(Known2.One) ||
1996 Known2.Zero.intersects(Known1.One))
2002 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2003 /// simplify operations downstream. Mask is known to be zero for bits that V
2006 /// This function is defined on values with integer type, values with pointer
2007 /// type, and vectors of integers. In the case
2008 /// where V is a vector, the mask, known zero, and known one values are the
2009 /// same width as the vector element, and the bit is set only if it is true
2010 /// for all of the elements in the vector.
2011 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2013 KnownBits Known(Mask.getBitWidth());
2014 computeKnownBits(V, Known, Depth, Q);
2015 return Mask.isSubsetOf(Known.Zero);
2018 /// For vector constants, loop over the elements and find the constant with the
2019 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2020 /// or if any element was not analyzed; otherwise, return the count for the
2021 /// element with the minimum number of sign bits.
2022 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2024 const auto *CV = dyn_cast<Constant>(V);
2025 if (!CV || !CV->getType()->isVectorTy())
2028 unsigned MinSignBits = TyBits;
2029 unsigned NumElts = CV->getType()->getVectorNumElements();
2030 for (unsigned i = 0; i != NumElts; ++i) {
2031 // If we find a non-ConstantInt, bail out.
2032 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2036 // If the sign bit is 1, flip the bits, so we always count leading zeros.
2037 APInt EltVal = Elt->getValue();
2038 if (EltVal.isNegative())
2040 MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2046 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2049 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2051 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2052 assert(Result > 0 && "At least one sign bit needs to be present!");
2056 /// Return the number of times the sign bit of the register is replicated into
2057 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2058 /// (itself), but other cases can give us information. For example, immediately
2059 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2060 /// other, so we return 3. For vectors, return the number of sign bits for the
2061 /// vector element with the mininum number of known sign bits.
2062 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2065 // We return the minimum number of sign bits that are guaranteed to be present
2066 // in V, so for undef we have to conservatively return 1. We don't have the
2067 // same behavior for poison though -- that's a FIXME today.
2069 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2071 unsigned FirstAnswer = 1;
2073 // Note that ConstantInt is handled by the general computeKnownBits case
2076 if (Depth == MaxDepth)
2077 return 1; // Limit search depth.
2079 const Operator *U = dyn_cast<Operator>(V);
2080 switch (Operator::getOpcode(V)) {
2082 case Instruction::SExt:
2083 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2084 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2086 case Instruction::SDiv: {
2087 const APInt *Denominator;
2088 // sdiv X, C -> adds log(C) sign bits.
2089 if (match(U->getOperand(1), m_APInt(Denominator))) {
2091 // Ignore non-positive denominator.
2092 if (!Denominator->isStrictlyPositive())
2095 // Calculate the incoming numerator bits.
2096 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2098 // Add floor(log(C)) bits to the numerator bits.
2099 return std::min(TyBits, NumBits + Denominator->logBase2());
2104 case Instruction::SRem: {
2105 const APInt *Denominator;
2106 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2107 // positive constant. This let us put a lower bound on the number of sign
2109 if (match(U->getOperand(1), m_APInt(Denominator))) {
2111 // Ignore non-positive denominator.
2112 if (!Denominator->isStrictlyPositive())
2115 // Calculate the incoming numerator bits. SRem by a positive constant
2116 // can't lower the number of sign bits.
2118 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2120 // Calculate the leading sign bit constraints by examining the
2121 // denominator. Given that the denominator is positive, there are two
2124 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2125 // (1 << ceilLogBase2(C)).
2127 // 2. the numerator is negative. Then the result range is (-C,0] and
2128 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2130 // Thus a lower bound on the number of sign bits is `TyBits -
2131 // ceilLogBase2(C)`.
2133 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2134 return std::max(NumrBits, ResBits);
2139 case Instruction::AShr: {
2140 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2141 // ashr X, C -> adds C sign bits. Vectors too.
2143 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2144 unsigned ShAmtLimited = ShAmt->getZExtValue();
2145 if (ShAmtLimited >= TyBits)
2146 break; // Bad shift.
2147 Tmp += ShAmtLimited;
2148 if (Tmp > TyBits) Tmp = TyBits;
2152 case Instruction::Shl: {
2154 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2155 // shl destroys sign bits.
2156 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2157 Tmp2 = ShAmt->getZExtValue();
2158 if (Tmp2 >= TyBits || // Bad shift.
2159 Tmp2 >= Tmp) break; // Shifted all sign bits out.
2164 case Instruction::And:
2165 case Instruction::Or:
2166 case Instruction::Xor: // NOT is handled here.
2167 // Logical binary ops preserve the number of sign bits at the worst.
2168 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2170 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2171 FirstAnswer = std::min(Tmp, Tmp2);
2172 // We computed what we know about the sign bits as our first
2173 // answer. Now proceed to the generic code that uses
2174 // computeKnownBits, and pick whichever answer is better.
2178 case Instruction::Select:
2179 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2180 if (Tmp == 1) return 1; // Early out.
2181 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2182 return std::min(Tmp, Tmp2);
2184 case Instruction::Add:
2185 // Add can have at most one carry bit. Thus we know that the output
2186 // is, at worst, one more bit than the inputs.
2187 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2188 if (Tmp == 1) return 1; // Early out.
2190 // Special case decrementing a value (ADD X, -1):
2191 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2192 if (CRHS->isAllOnesValue()) {
2193 KnownBits Known(TyBits);
2194 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2196 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2198 if ((Known.Zero | 1).isAllOnesValue())
2201 // If we are subtracting one from a positive number, there is no carry
2202 // out of the result.
2203 if (Known.isNonNegative())
2207 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2208 if (Tmp2 == 1) return 1;
2209 return std::min(Tmp, Tmp2)-1;
2211 case Instruction::Sub:
2212 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2213 if (Tmp2 == 1) return 1;
2216 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2217 if (CLHS->isNullValue()) {
2218 KnownBits Known(TyBits);
2219 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2220 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2222 if ((Known.Zero | 1).isAllOnesValue())
2225 // If the input is known to be positive (the sign bit is known clear),
2226 // the output of the NEG has the same number of sign bits as the input.
2227 if (Known.isNonNegative())
2230 // Otherwise, we treat this like a SUB.
2233 // Sub can have at most one carry bit. Thus we know that the output
2234 // is, at worst, one more bit than the inputs.
2235 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2236 if (Tmp == 1) return 1; // Early out.
2237 return std::min(Tmp, Tmp2)-1;
2239 case Instruction::PHI: {
2240 const PHINode *PN = cast<PHINode>(U);
2241 unsigned NumIncomingValues = PN->getNumIncomingValues();
2242 // Don't analyze large in-degree PHIs.
2243 if (NumIncomingValues > 4) break;
2244 // Unreachable blocks may have zero-operand PHI nodes.
2245 if (NumIncomingValues == 0) break;
2247 // Take the minimum of all incoming values. This can't infinitely loop
2248 // because of our depth threshold.
2249 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2250 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2251 if (Tmp == 1) return Tmp;
2253 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2258 case Instruction::Trunc:
2259 // FIXME: it's tricky to do anything useful for this, but it is an important
2260 // case for targets like X86.
2263 case Instruction::ExtractElement:
2264 // Look through extract element. At the moment we keep this simple and skip
2265 // tracking the specific element. But at least we might find information
2266 // valid for all elements of the vector (for example if vector is sign
2267 // extended, shifted, etc).
2268 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2271 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2272 // use this information.
2274 // If we can examine all elements of a vector constant successfully, we're
2275 // done (we can't do any better than that). If not, keep trying.
2276 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2279 KnownBits Known(TyBits);
2280 computeKnownBits(V, Known, Depth, Q);
2282 // If we know that the sign bit is either zero or one, determine the number of
2283 // identical bits in the top of the input value.
2284 return std::max(FirstAnswer, Known.countMinSignBits());
2287 /// This function computes the integer multiple of Base that equals V.
2288 /// If successful, it returns true and returns the multiple in
2289 /// Multiple. If unsuccessful, it returns false. It looks
2290 /// through SExt instructions only if LookThroughSExt is true.
2291 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2292 bool LookThroughSExt, unsigned Depth) {
2293 const unsigned MaxDepth = 6;
2295 assert(V && "No Value?");
2296 assert(Depth <= MaxDepth && "Limit Search Depth");
2297 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2299 Type *T = V->getType();
2301 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2311 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2312 Constant *BaseVal = ConstantInt::get(T, Base);
2313 if (CO && CO == BaseVal) {
2315 Multiple = ConstantInt::get(T, 1);
2319 if (CI && CI->getZExtValue() % Base == 0) {
2320 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2324 if (Depth == MaxDepth) return false; // Limit search depth.
2326 Operator *I = dyn_cast<Operator>(V);
2327 if (!I) return false;
2329 switch (I->getOpcode()) {
2331 case Instruction::SExt:
2332 if (!LookThroughSExt) return false;
2333 // otherwise fall through to ZExt
2335 case Instruction::ZExt:
2336 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2337 LookThroughSExt, Depth+1);
2338 case Instruction::Shl:
2339 case Instruction::Mul: {
2340 Value *Op0 = I->getOperand(0);
2341 Value *Op1 = I->getOperand(1);
2343 if (I->getOpcode() == Instruction::Shl) {
2344 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2345 if (!Op1CI) return false;
2346 // Turn Op0 << Op1 into Op0 * 2^Op1
2347 APInt Op1Int = Op1CI->getValue();
2348 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2349 APInt API(Op1Int.getBitWidth(), 0);
2350 API.setBit(BitToSet);
2351 Op1 = ConstantInt::get(V->getContext(), API);
2354 Value *Mul0 = nullptr;
2355 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2356 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2357 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2358 if (Op1C->getType()->getPrimitiveSizeInBits() <
2359 MulC->getType()->getPrimitiveSizeInBits())
2360 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2361 if (Op1C->getType()->getPrimitiveSizeInBits() >
2362 MulC->getType()->getPrimitiveSizeInBits())
2363 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2365 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2366 Multiple = ConstantExpr::getMul(MulC, Op1C);
2370 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2371 if (Mul0CI->getValue() == 1) {
2372 // V == Base * Op1, so return Op1
2378 Value *Mul1 = nullptr;
2379 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2380 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2381 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2382 if (Op0C->getType()->getPrimitiveSizeInBits() <
2383 MulC->getType()->getPrimitiveSizeInBits())
2384 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2385 if (Op0C->getType()->getPrimitiveSizeInBits() >
2386 MulC->getType()->getPrimitiveSizeInBits())
2387 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2389 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2390 Multiple = ConstantExpr::getMul(MulC, Op0C);
2394 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2395 if (Mul1CI->getValue() == 1) {
2396 // V == Base * Op0, so return Op0
2404 // We could not determine if V is a multiple of Base.
2408 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2409 const TargetLibraryInfo *TLI) {
2410 const Function *F = ICS.getCalledFunction();
2412 return Intrinsic::not_intrinsic;
2414 if (F->isIntrinsic())
2415 return F->getIntrinsicID();
2418 return Intrinsic::not_intrinsic;
2421 // We're going to make assumptions on the semantics of the functions, check
2422 // that the target knows that it's available in this environment and it does
2423 // not have local linkage.
2424 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2425 return Intrinsic::not_intrinsic;
2427 if (!ICS.onlyReadsMemory())
2428 return Intrinsic::not_intrinsic;
2430 // Otherwise check if we have a call to a function that can be turned into a
2431 // vector intrinsic.
2438 return Intrinsic::sin;
2442 return Intrinsic::cos;
2446 return Intrinsic::exp;
2450 return Intrinsic::exp2;
2454 return Intrinsic::log;
2456 case LibFunc_log10f:
2457 case LibFunc_log10l:
2458 return Intrinsic::log10;
2462 return Intrinsic::log2;
2466 return Intrinsic::fabs;
2470 return Intrinsic::minnum;
2474 return Intrinsic::maxnum;
2475 case LibFunc_copysign:
2476 case LibFunc_copysignf:
2477 case LibFunc_copysignl:
2478 return Intrinsic::copysign;
2480 case LibFunc_floorf:
2481 case LibFunc_floorl:
2482 return Intrinsic::floor;
2486 return Intrinsic::ceil;
2488 case LibFunc_truncf:
2489 case LibFunc_truncl:
2490 return Intrinsic::trunc;
2494 return Intrinsic::rint;
2495 case LibFunc_nearbyint:
2496 case LibFunc_nearbyintf:
2497 case LibFunc_nearbyintl:
2498 return Intrinsic::nearbyint;
2500 case LibFunc_roundf:
2501 case LibFunc_roundl:
2502 return Intrinsic::round;
2506 return Intrinsic::pow;
2510 if (ICS->hasNoNaNs())
2511 return Intrinsic::sqrt;
2512 return Intrinsic::not_intrinsic;
2515 return Intrinsic::not_intrinsic;
2518 /// Return true if we can prove that the specified FP value is never equal to
2521 /// NOTE: this function will need to be revisited when we support non-default
2524 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2526 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2527 return !CFP->getValueAPF().isNegZero();
2529 if (Depth == MaxDepth)
2530 return false; // Limit search depth.
2532 const Operator *I = dyn_cast<Operator>(V);
2533 if (!I) return false;
2535 // Check if the nsz fast-math flag is set
2536 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2537 if (FPO->hasNoSignedZeros())
2540 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2541 if (I->getOpcode() == Instruction::FAdd)
2542 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2543 if (CFP->isNullValue())
2546 // sitofp and uitofp turn into +0.0 for zero.
2547 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2550 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2551 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2555 // sqrt(-0.0) = -0.0, no other negative results are possible.
2556 case Intrinsic::sqrt:
2557 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2559 case Intrinsic::fabs:
2567 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2568 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2569 /// bit despite comparing equal.
2570 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2571 const TargetLibraryInfo *TLI,
2574 // TODO: This function does not do the right thing when SignBitOnly is true
2575 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2576 // which flips the sign bits of NaNs. See
2577 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2579 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2580 return !CFP->getValueAPF().isNegative() ||
2581 (!SignBitOnly && CFP->getValueAPF().isZero());
2584 if (Depth == MaxDepth)
2585 return false; // Limit search depth.
2587 const Operator *I = dyn_cast<Operator>(V);
2591 switch (I->getOpcode()) {
2594 // Unsigned integers are always nonnegative.
2595 case Instruction::UIToFP:
2597 case Instruction::FMul:
2598 // x*x is always non-negative or a NaN.
2599 if (I->getOperand(0) == I->getOperand(1) &&
2600 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2604 case Instruction::FAdd:
2605 case Instruction::FDiv:
2606 case Instruction::FRem:
2607 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2609 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2611 case Instruction::Select:
2612 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2614 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2616 case Instruction::FPExt:
2617 case Instruction::FPTrunc:
2618 // Widening/narrowing never change sign.
2619 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2621 case Instruction::Call:
2622 const auto *CI = cast<CallInst>(I);
2623 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2627 case Intrinsic::maxnum:
2628 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2630 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2632 case Intrinsic::minnum:
2633 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2635 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2637 case Intrinsic::exp:
2638 case Intrinsic::exp2:
2639 case Intrinsic::fabs:
2642 case Intrinsic::sqrt:
2643 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2646 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2647 CannotBeNegativeZero(CI->getOperand(0), TLI));
2649 case Intrinsic::powi:
2650 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2651 // powi(x,n) is non-negative if n is even.
2652 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2655 // TODO: This is not correct. Given that exp is an integer, here are the
2656 // ways that pow can return a negative value:
2658 // pow(x, exp) --> negative if exp is odd and x is negative.
2659 // pow(-0, exp) --> -inf if exp is negative odd.
2660 // pow(-0, exp) --> -0 if exp is positive odd.
2661 // pow(-inf, exp) --> -0 if exp is negative odd.
2662 // pow(-inf, exp) --> -inf if exp is positive odd.
2664 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2665 // but we must return false if x == -0. Unfortunately we do not currently
2666 // have a way of expressing this constraint. See details in
2667 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2668 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2671 case Intrinsic::fma:
2672 case Intrinsic::fmuladd:
2673 // x*x+y is non-negative if y is non-negative.
2674 return I->getOperand(0) == I->getOperand(1) &&
2675 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2676 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2684 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2685 const TargetLibraryInfo *TLI) {
2686 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2689 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2690 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2693 /// If the specified value can be set by repeating the same byte in memory,
2694 /// return the i8 value that it is represented with. This is
2695 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2696 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2697 /// byte store (e.g. i16 0x1234), return null.
2698 Value *llvm::isBytewiseValue(Value *V) {
2699 // All byte-wide stores are splatable, even of arbitrary variables.
2700 if (V->getType()->isIntegerTy(8)) return V;
2702 // Handle 'null' ConstantArrayZero etc.
2703 if (Constant *C = dyn_cast<Constant>(V))
2704 if (C->isNullValue())
2705 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2707 // Constant float and double values can be handled as integer values if the
2708 // corresponding integer value is "byteable". An important case is 0.0.
2709 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2710 if (CFP->getType()->isFloatTy())
2711 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2712 if (CFP->getType()->isDoubleTy())
2713 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2714 // Don't handle long double formats, which have strange constraints.
2717 // We can handle constant integers that are multiple of 8 bits.
2718 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2719 if (CI->getBitWidth() % 8 == 0) {
2720 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2722 if (!CI->getValue().isSplat(8))
2724 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2728 // A ConstantDataArray/Vector is splatable if all its members are equal and
2730 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2731 Value *Elt = CA->getElementAsConstant(0);
2732 Value *Val = isBytewiseValue(Elt);
2736 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2737 if (CA->getElementAsConstant(I) != Elt)
2743 // Conceptually, we could handle things like:
2744 // %a = zext i8 %X to i16
2745 // %b = shl i16 %a, 8
2746 // %c = or i16 %a, %b
2747 // but until there is an example that actually needs this, it doesn't seem
2748 // worth worrying about.
2753 // This is the recursive version of BuildSubAggregate. It takes a few different
2754 // arguments. Idxs is the index within the nested struct From that we are
2755 // looking at now (which is of type IndexedType). IdxSkip is the number of
2756 // indices from Idxs that should be left out when inserting into the resulting
2757 // struct. To is the result struct built so far, new insertvalue instructions
2759 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2760 SmallVectorImpl<unsigned> &Idxs,
2762 Instruction *InsertBefore) {
2763 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2765 // Save the original To argument so we can modify it
2767 // General case, the type indexed by Idxs is a struct
2768 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2769 // Process each struct element recursively
2772 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2776 // Couldn't find any inserted value for this index? Cleanup
2777 while (PrevTo != OrigTo) {
2778 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2779 PrevTo = Del->getAggregateOperand();
2780 Del->eraseFromParent();
2782 // Stop processing elements
2786 // If we successfully found a value for each of our subaggregates
2790 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2791 // the struct's elements had a value that was inserted directly. In the latter
2792 // case, perhaps we can't determine each of the subelements individually, but
2793 // we might be able to find the complete struct somewhere.
2795 // Find the value that is at that particular spot
2796 Value *V = FindInsertedValue(From, Idxs);
2801 // Insert the value in the new (sub) aggregrate
2802 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2803 "tmp", InsertBefore);
2806 // This helper takes a nested struct and extracts a part of it (which is again a
2807 // struct) into a new value. For example, given the struct:
2808 // { a, { b, { c, d }, e } }
2809 // and the indices "1, 1" this returns
2812 // It does this by inserting an insertvalue for each element in the resulting
2813 // struct, as opposed to just inserting a single struct. This will only work if
2814 // each of the elements of the substruct are known (ie, inserted into From by an
2815 // insertvalue instruction somewhere).
2817 // All inserted insertvalue instructions are inserted before InsertBefore
2818 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2819 Instruction *InsertBefore) {
2820 assert(InsertBefore && "Must have someplace to insert!");
2821 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2823 Value *To = UndefValue::get(IndexedType);
2824 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2825 unsigned IdxSkip = Idxs.size();
2827 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2830 /// Given an aggregrate and an sequence of indices, see if
2831 /// the scalar value indexed is already around as a register, for example if it
2832 /// were inserted directly into the aggregrate.
2834 /// If InsertBefore is not null, this function will duplicate (modified)
2835 /// insertvalues when a part of a nested struct is extracted.
2836 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2837 Instruction *InsertBefore) {
2838 // Nothing to index? Just return V then (this is useful at the end of our
2840 if (idx_range.empty())
2842 // We have indices, so V should have an indexable type.
2843 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2844 "Not looking at a struct or array?");
2845 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2846 "Invalid indices for type?");
2848 if (Constant *C = dyn_cast<Constant>(V)) {
2849 C = C->getAggregateElement(idx_range[0]);
2850 if (!C) return nullptr;
2851 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2854 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2855 // Loop the indices for the insertvalue instruction in parallel with the
2856 // requested indices
2857 const unsigned *req_idx = idx_range.begin();
2858 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2859 i != e; ++i, ++req_idx) {
2860 if (req_idx == idx_range.end()) {
2861 // We can't handle this without inserting insertvalues
2865 // The requested index identifies a part of a nested aggregate. Handle
2866 // this specially. For example,
2867 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2868 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2869 // %C = extractvalue {i32, { i32, i32 } } %B, 1
2870 // This can be changed into
2871 // %A = insertvalue {i32, i32 } undef, i32 10, 0
2872 // %C = insertvalue {i32, i32 } %A, i32 11, 1
2873 // which allows the unused 0,0 element from the nested struct to be
2875 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2879 // This insert value inserts something else than what we are looking for.
2880 // See if the (aggregate) value inserted into has the value we are
2881 // looking for, then.
2883 return FindInsertedValue(I->getAggregateOperand(), idx_range,
2886 // If we end up here, the indices of the insertvalue match with those
2887 // requested (though possibly only partially). Now we recursively look at
2888 // the inserted value, passing any remaining indices.
2889 return FindInsertedValue(I->getInsertedValueOperand(),
2890 makeArrayRef(req_idx, idx_range.end()),
2894 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2895 // If we're extracting a value from an aggregate that was extracted from
2896 // something else, we can extract from that something else directly instead.
2897 // However, we will need to chain I's indices with the requested indices.
2899 // Calculate the number of indices required
2900 unsigned size = I->getNumIndices() + idx_range.size();
2901 // Allocate some space to put the new indices in
2902 SmallVector<unsigned, 5> Idxs;
2904 // Add indices from the extract value instruction
2905 Idxs.append(I->idx_begin(), I->idx_end());
2907 // Add requested indices
2908 Idxs.append(idx_range.begin(), idx_range.end());
2910 assert(Idxs.size() == size
2911 && "Number of indices added not correct?");
2913 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2915 // Otherwise, we don't know (such as, extracting from a function return value
2916 // or load instruction)
2920 /// Analyze the specified pointer to see if it can be expressed as a base
2921 /// pointer plus a constant offset. Return the base and offset to the caller.
2922 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2923 const DataLayout &DL) {
2924 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2925 APInt ByteOffset(BitWidth, 0);
2927 // We walk up the defs but use a visited set to handle unreachable code. In
2928 // that case, we stop after accumulating the cycle once (not that it
2930 SmallPtrSet<Value *, 16> Visited;
2931 while (Visited.insert(Ptr).second) {
2932 if (Ptr->getType()->isVectorTy())
2935 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2936 // If one of the values we have visited is an addrspacecast, then
2937 // the pointer type of this GEP may be different from the type
2938 // of the Ptr parameter which was passed to this function. This
2939 // means when we construct GEPOffset, we need to use the size
2940 // of GEP's pointer type rather than the size of the original
2942 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
2943 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2946 ByteOffset += GEPOffset.getSExtValue();
2948 Ptr = GEP->getPointerOperand();
2949 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2950 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2951 Ptr = cast<Operator>(Ptr)->getOperand(0);
2952 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2953 if (GA->isInterposable())
2955 Ptr = GA->getAliasee();
2960 Offset = ByteOffset.getSExtValue();
2964 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
2965 unsigned CharSize) {
2966 // Make sure the GEP has exactly three arguments.
2967 if (GEP->getNumOperands() != 3)
2970 // Make sure the index-ee is a pointer to array of \p CharSize integers.
2972 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2973 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
2976 // Check to make sure that the first operand of the GEP is an integer and
2977 // has value 0 so that we are sure we're indexing into the initializer.
2978 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2979 if (!FirstIdx || !FirstIdx->isZero())
2985 bool llvm::getConstantDataArrayInfo(const Value *V,
2986 ConstantDataArraySlice &Slice,
2987 unsigned ElementSize, uint64_t Offset) {
2990 // Look through bitcast instructions and geps.
2991 V = V->stripPointerCasts();
2993 // If the value is a GEP instruction or constant expression, treat it as an
2995 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2996 // The GEP operator should be based on a pointer to string constant, and is
2997 // indexing into the string constant.
2998 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3001 // If the second index isn't a ConstantInt, then this is a variable index
3002 // into the array. If this occurs, we can't say anything meaningful about
3004 uint64_t StartIdx = 0;
3005 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3006 StartIdx = CI->getZExtValue();
3009 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3013 // The GEP instruction, constant or instruction, must reference a global
3014 // variable that is a constant and is initialized. The referenced constant
3015 // initializer is the array that we'll use for optimization.
3016 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3017 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3020 const ConstantDataArray *Array;
3022 if (GV->getInitializer()->isNullValue()) {
3023 Type *GVTy = GV->getValueType();
3024 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3025 // A zeroinitializer for the array; there is no ConstantDataArray.
3028 const DataLayout &DL = GV->getParent()->getDataLayout();
3029 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3030 uint64_t Length = SizeInBytes / (ElementSize / 8);
3031 if (Length <= Offset)
3034 Slice.Array = nullptr;
3036 Slice.Length = Length - Offset;
3040 // This must be a ConstantDataArray.
3041 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3044 ArrayTy = Array->getType();
3046 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3049 uint64_t NumElts = ArrayTy->getArrayNumElements();
3050 if (Offset > NumElts)
3053 Slice.Array = Array;
3054 Slice.Offset = Offset;
3055 Slice.Length = NumElts - Offset;
3059 /// This function computes the length of a null-terminated C string pointed to
3060 /// by V. If successful, it returns true and returns the string in Str.
3061 /// If unsuccessful, it returns false.
3062 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3063 uint64_t Offset, bool TrimAtNul) {
3064 ConstantDataArraySlice Slice;
3065 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3068 if (Slice.Array == nullptr) {
3073 if (Slice.Length == 1) {
3074 Str = StringRef("", 1);
3077 // We cannot instantiate a StringRef as we do not have an appropriate string
3082 // Start out with the entire array in the StringRef.
3083 Str = Slice.Array->getAsString();
3084 // Skip over 'offset' bytes.
3085 Str = Str.substr(Slice.Offset);
3088 // Trim off the \0 and anything after it. If the array is not nul
3089 // terminated, we just return the whole end of string. The client may know
3090 // some other way that the string is length-bound.
3091 Str = Str.substr(0, Str.find('\0'));
3096 // These next two are very similar to the above, but also look through PHI
3098 // TODO: See if we can integrate these two together.
3100 /// If we can compute the length of the string pointed to by
3101 /// the specified pointer, return 'len+1'. If we can't, return 0.
3102 static uint64_t GetStringLengthH(const Value *V,
3103 SmallPtrSetImpl<const PHINode*> &PHIs,
3104 unsigned CharSize) {
3105 // Look through noop bitcast instructions.
3106 V = V->stripPointerCasts();
3108 // If this is a PHI node, there are two cases: either we have already seen it
3110 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3111 if (!PHIs.insert(PN).second)
3112 return ~0ULL; // already in the set.
3114 // If it was new, see if all the input strings are the same length.
3115 uint64_t LenSoFar = ~0ULL;
3116 for (Value *IncValue : PN->incoming_values()) {
3117 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3118 if (Len == 0) return 0; // Unknown length -> unknown.
3120 if (Len == ~0ULL) continue;
3122 if (Len != LenSoFar && LenSoFar != ~0ULL)
3123 return 0; // Disagree -> unknown.
3127 // Success, all agree.
3131 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3132 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3133 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3134 if (Len1 == 0) return 0;
3135 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3136 if (Len2 == 0) return 0;
3137 if (Len1 == ~0ULL) return Len2;
3138 if (Len2 == ~0ULL) return Len1;
3139 if (Len1 != Len2) return 0;
3143 // Otherwise, see if we can read the string.
3144 ConstantDataArraySlice Slice;
3145 if (!getConstantDataArrayInfo(V, Slice, CharSize))
3148 if (Slice.Array == nullptr)
3151 // Search for nul characters
3152 unsigned NullIndex = 0;
3153 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3154 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3158 return NullIndex + 1;
3161 /// If we can compute the length of the string pointed to by
3162 /// the specified pointer, return 'len+1'. If we can't, return 0.
3163 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3164 if (!V->getType()->isPointerTy()) return 0;
3166 SmallPtrSet<const PHINode*, 32> PHIs;
3167 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3168 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3169 // an empty string as a length.
3170 return Len == ~0ULL ? 1 : Len;
3173 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
3174 /// previous iteration of the loop was referring to the same object as \p PN.
3175 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3176 const LoopInfo *LI) {
3177 // Find the loop-defined value.
3178 Loop *L = LI->getLoopFor(PN->getParent());
3179 if (PN->getNumIncomingValues() != 2)
3182 // Find the value from previous iteration.
3183 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3184 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3185 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3186 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3189 // If a new pointer is loaded in the loop, the pointer references a different
3190 // object in every iteration. E.g.:
3194 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3195 if (!L->isLoopInvariant(Load->getPointerOperand()))
3200 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3201 unsigned MaxLookup) {
3202 if (!V->getType()->isPointerTy())
3204 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3205 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3206 V = GEP->getPointerOperand();
3207 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3208 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3209 V = cast<Operator>(V)->getOperand(0);
3210 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3211 if (GA->isInterposable())
3213 V = GA->getAliasee();
3214 } else if (isa<AllocaInst>(V)) {
3215 // An alloca can't be further simplified.
3218 if (auto CS = CallSite(V))
3219 if (Value *RV = CS.getReturnedArgOperand()) {
3224 // See if InstructionSimplify knows any relevant tricks.
3225 if (Instruction *I = dyn_cast<Instruction>(V))
3226 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3227 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3234 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3239 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3240 const DataLayout &DL, LoopInfo *LI,
3241 unsigned MaxLookup) {
3242 SmallPtrSet<Value *, 4> Visited;
3243 SmallVector<Value *, 4> Worklist;
3244 Worklist.push_back(V);
3246 Value *P = Worklist.pop_back_val();
3247 P = GetUnderlyingObject(P, DL, MaxLookup);
3249 if (!Visited.insert(P).second)
3252 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3253 Worklist.push_back(SI->getTrueValue());
3254 Worklist.push_back(SI->getFalseValue());
3258 if (PHINode *PN = dyn_cast<PHINode>(P)) {
3259 // If this PHI changes the underlying object in every iteration of the
3260 // loop, don't look through it. Consider:
3263 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3267 // Prev is tracking Curr one iteration behind so they refer to different
3268 // underlying objects.
3269 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3270 isSameUnderlyingObjectInLoop(PN, LI))
3271 for (Value *IncValue : PN->incoming_values())
3272 Worklist.push_back(IncValue);
3276 Objects.push_back(P);
3277 } while (!Worklist.empty());
3280 /// Return true if the only users of this pointer are lifetime markers.
3281 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3282 for (const User *U : V->users()) {
3283 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3284 if (!II) return false;
3286 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3287 II->getIntrinsicID() != Intrinsic::lifetime_end)
3293 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3294 const Instruction *CtxI,
3295 const DominatorTree *DT) {
3296 const Operator *Inst = dyn_cast<Operator>(V);
3300 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3301 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3305 switch (Inst->getOpcode()) {
3308 case Instruction::UDiv:
3309 case Instruction::URem: {
3310 // x / y is undefined if y == 0.
3312 if (match(Inst->getOperand(1), m_APInt(V)))
3316 case Instruction::SDiv:
3317 case Instruction::SRem: {
3318 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3319 const APInt *Numerator, *Denominator;
3320 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3322 // We cannot hoist this division if the denominator is 0.
3323 if (*Denominator == 0)
3325 // It's safe to hoist if the denominator is not 0 or -1.
3326 if (*Denominator != -1)
3328 // At this point we know that the denominator is -1. It is safe to hoist as
3329 // long we know that the numerator is not INT_MIN.
3330 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3331 return !Numerator->isMinSignedValue();
3332 // The numerator *might* be MinSignedValue.
3335 case Instruction::Load: {
3336 const LoadInst *LI = cast<LoadInst>(Inst);
3337 if (!LI->isUnordered() ||
3338 // Speculative load may create a race that did not exist in the source.
3339 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3340 // Speculative load may load data from dirty regions.
3341 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3343 const DataLayout &DL = LI->getModule()->getDataLayout();
3344 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3345 LI->getAlignment(), DL, CtxI, DT);
3347 case Instruction::Call: {
3348 auto *CI = cast<const CallInst>(Inst);
3349 const Function *Callee = CI->getCalledFunction();
3351 // The called function could have undefined behavior or side-effects, even
3352 // if marked readnone nounwind.
3353 return Callee && Callee->isSpeculatable();
3355 case Instruction::VAArg:
3356 case Instruction::Alloca:
3357 case Instruction::Invoke:
3358 case Instruction::PHI:
3359 case Instruction::Store:
3360 case Instruction::Ret:
3361 case Instruction::Br:
3362 case Instruction::IndirectBr:
3363 case Instruction::Switch:
3364 case Instruction::Unreachable:
3365 case Instruction::Fence:
3366 case Instruction::AtomicRMW:
3367 case Instruction::AtomicCmpXchg:
3368 case Instruction::LandingPad:
3369 case Instruction::Resume:
3370 case Instruction::CatchSwitch:
3371 case Instruction::CatchPad:
3372 case Instruction::CatchRet:
3373 case Instruction::CleanupPad:
3374 case Instruction::CleanupRet:
3375 return false; // Misc instructions which have effects
3379 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3380 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3383 /// Return true if we know that the specified value is never null.
3384 bool llvm::isKnownNonNull(const Value *V) {
3385 assert(V->getType()->isPointerTy() && "V must be pointer type");
3387 // Alloca never returns null, malloc might.
3388 if (isa<AllocaInst>(V)) return true;
3390 // A byval, inalloca, or nonnull argument is never null.
3391 if (const Argument *A = dyn_cast<Argument>(V))
3392 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3394 // A global variable in address space 0 is non null unless extern weak
3395 // or an absolute symbol reference. Other address spaces may have null as a
3396 // valid address for a global, so we can't assume anything.
3397 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3398 return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3399 GV->getType()->getAddressSpace() == 0;
3401 // A Load tagged with nonnull metadata is never null.
3402 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3403 return LI->getMetadata(LLVMContext::MD_nonnull);
3405 if (auto CS = ImmutableCallSite(V))
3406 if (CS.isReturnNonNull())
3412 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3413 const Instruction *CtxI,
3414 const DominatorTree *DT) {
3415 assert(V->getType()->isPointerTy() && "V must be pointer type");
3416 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
3417 assert(CtxI && "Context instruction required for analysis");
3418 assert(DT && "Dominator tree required for analysis");
3420 unsigned NumUsesExplored = 0;
3421 for (auto *U : V->users()) {
3422 // Avoid massive lists
3423 if (NumUsesExplored >= DomConditionsMaxUses)
3427 // If the value is used as an argument to a call or invoke, then argument
3428 // attributes may provide an answer about null-ness.
3429 if (auto CS = ImmutableCallSite(U))
3430 if (auto *CalledFunc = CS.getCalledFunction())
3431 for (const Argument &Arg : CalledFunc->args())
3432 if (CS.getArgOperand(Arg.getArgNo()) == V &&
3433 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
3436 // Consider only compare instructions uniquely controlling a branch
3437 CmpInst::Predicate Pred;
3438 if (!match(const_cast<User *>(U),
3439 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3440 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3443 for (auto *CmpU : U->users()) {
3444 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3445 assert(BI->isConditional() && "uses a comparison!");
3447 BasicBlock *NonNullSuccessor =
3448 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3449 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3450 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3452 } else if (Pred == ICmpInst::ICMP_NE &&
3453 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3454 DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3463 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3464 const DominatorTree *DT) {
3465 if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
3468 if (isKnownNonNull(V))
3474 return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3477 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3479 const DataLayout &DL,
3480 AssumptionCache *AC,
3481 const Instruction *CxtI,
3482 const DominatorTree *DT) {
3483 // Multiplying n * m significant bits yields a result of n + m significant
3484 // bits. If the total number of significant bits does not exceed the
3485 // result bit width (minus 1), there is no overflow.
3486 // This means if we have enough leading zero bits in the operands
3487 // we can guarantee that the result does not overflow.
3488 // Ref: "Hacker's Delight" by Henry Warren
3489 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3490 KnownBits LHSKnown(BitWidth);
3491 KnownBits RHSKnown(BitWidth);
3492 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3493 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3494 // Note that underestimating the number of zero bits gives a more
3495 // conservative answer.
3496 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3497 RHSKnown.countMinLeadingZeros();
3498 // First handle the easy case: if we have enough zero bits there's
3499 // definitely no overflow.
3500 if (ZeroBits >= BitWidth)
3501 return OverflowResult::NeverOverflows;
3503 // Get the largest possible values for each operand.
3504 APInt LHSMax = ~LHSKnown.Zero;
3505 APInt RHSMax = ~RHSKnown.Zero;
3507 // We know the multiply operation doesn't overflow if the maximum values for
3508 // each operand will not overflow after we multiply them together.
3510 (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3512 return OverflowResult::NeverOverflows;
3514 // We know it always overflows if multiplying the smallest possible values for
3515 // the operands also results in overflow.
3517 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3519 return OverflowResult::AlwaysOverflows;
3521 return OverflowResult::MayOverflow;
3524 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3526 const DataLayout &DL,
3527 AssumptionCache *AC,
3528 const Instruction *CxtI,
3529 const DominatorTree *DT) {
3530 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3531 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3532 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3534 if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3535 // The sign bit is set in both cases: this MUST overflow.
3536 // Create a simple add instruction, and insert it into the struct.
3537 return OverflowResult::AlwaysOverflows;
3540 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3541 // The sign bit is clear in both cases: this CANNOT overflow.
3542 // Create a simple add instruction, and insert it into the struct.
3543 return OverflowResult::NeverOverflows;
3547 return OverflowResult::MayOverflow;
3550 /// \brief Return true if we can prove that adding the two values of the
3551 /// knownbits will not overflow.
3552 /// Otherwise return false.
3553 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3554 const KnownBits &RHSKnown) {
3555 // Addition of two 2's complement numbers having opposite signs will never
3557 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3558 (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3561 // If either of the values is known to be non-negative, adding them can only
3562 // overflow if the second is also non-negative, so we can assume that.
3563 // Two non-negative numbers will only overflow if there is a carry to the
3564 // sign bit, so we can check if even when the values are as big as possible
3565 // there is no overflow to the sign bit.
3566 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3567 APInt MaxLHS = ~LHSKnown.Zero;
3568 MaxLHS.clearSignBit();
3569 APInt MaxRHS = ~RHSKnown.Zero;
3570 MaxRHS.clearSignBit();
3571 APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3572 return Result.isSignBitClear();
3575 // If either of the values is known to be negative, adding them can only
3576 // overflow if the second is also negative, so we can assume that.
3577 // Two negative number will only overflow if there is no carry to the sign
3578 // bit, so we can check if even when the values are as small as possible
3579 // there is overflow to the sign bit.
3580 if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3581 APInt MinLHS = LHSKnown.One;
3582 MinLHS.clearSignBit();
3583 APInt MinRHS = RHSKnown.One;
3584 MinRHS.clearSignBit();
3585 APInt Result = std::move(MinLHS) + std::move(MinRHS);
3586 return Result.isSignBitSet();
3589 // If we reached here it means that we know nothing about the sign bits.
3590 // In this case we can't know if there will be an overflow, since by
3591 // changing the sign bits any two values can be made to overflow.
3595 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3597 const AddOperator *Add,
3598 const DataLayout &DL,
3599 AssumptionCache *AC,
3600 const Instruction *CxtI,
3601 const DominatorTree *DT) {
3602 if (Add && Add->hasNoSignedWrap()) {
3603 return OverflowResult::NeverOverflows;
3606 // If LHS and RHS each have at least two sign bits, the addition will look
3612 // If the carry into the most significant position is 0, X and Y can't both
3613 // be 1 and therefore the carry out of the addition is also 0.
3615 // If the carry into the most significant position is 1, X and Y can't both
3616 // be 0 and therefore the carry out of the addition is also 1.
3618 // Since the carry into the most significant position is always equal to
3619 // the carry out of the addition, there is no signed overflow.
3620 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3621 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3622 return OverflowResult::NeverOverflows;
3624 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3625 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3627 if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3628 return OverflowResult::NeverOverflows;
3630 // The remaining code needs Add to be available. Early returns if not so.
3632 return OverflowResult::MayOverflow;
3634 // If the sign of Add is the same as at least one of the operands, this add
3635 // CANNOT overflow. This is particularly useful when the sum is
3636 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3638 bool LHSOrRHSKnownNonNegative =
3639 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3640 bool LHSOrRHSKnownNegative =
3641 (LHSKnown.isNegative() || RHSKnown.isNegative());
3642 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3643 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3644 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3645 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3646 return OverflowResult::NeverOverflows;
3650 return OverflowResult::MayOverflow;
3653 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3654 const DominatorTree &DT) {
3656 auto IID = II->getIntrinsicID();
3657 assert((IID == Intrinsic::sadd_with_overflow ||
3658 IID == Intrinsic::uadd_with_overflow ||
3659 IID == Intrinsic::ssub_with_overflow ||
3660 IID == Intrinsic::usub_with_overflow ||
3661 IID == Intrinsic::smul_with_overflow ||
3662 IID == Intrinsic::umul_with_overflow) &&
3663 "Not an overflow intrinsic!");
3666 SmallVector<const BranchInst *, 2> GuardingBranches;
3667 SmallVector<const ExtractValueInst *, 2> Results;
3669 for (const User *U : II->users()) {
3670 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3671 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3673 if (EVI->getIndices()[0] == 0)
3674 Results.push_back(EVI);
3676 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3678 for (const auto *U : EVI->users())
3679 if (const auto *B = dyn_cast<BranchInst>(U)) {
3680 assert(B->isConditional() && "How else is it using an i1?");
3681 GuardingBranches.push_back(B);
3685 // We are using the aggregate directly in a way we don't want to analyze
3686 // here (storing it to a global, say).
3691 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3692 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3693 if (!NoWrapEdge.isSingleEdge())
3696 // Check if all users of the add are provably no-wrap.
3697 for (const auto *Result : Results) {
3698 // If the extractvalue itself is not executed on overflow, the we don't
3699 // need to check each use separately, since domination is transitive.
3700 if (DT.dominates(NoWrapEdge, Result->getParent()))
3703 for (auto &RU : Result->uses())
3704 if (!DT.dominates(NoWrapEdge, RU))
3711 return any_of(GuardingBranches, AllUsesGuardedByBranch);
3715 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3716 const DataLayout &DL,
3717 AssumptionCache *AC,
3718 const Instruction *CxtI,
3719 const DominatorTree *DT) {
3720 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3721 Add, DL, AC, CxtI, DT);
3724 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3726 const DataLayout &DL,
3727 AssumptionCache *AC,
3728 const Instruction *CxtI,
3729 const DominatorTree *DT) {
3730 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3733 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3734 // A memory operation returns normally if it isn't volatile. A volatile
3735 // operation is allowed to trap.
3737 // An atomic operation isn't guaranteed to return in a reasonable amount of
3738 // time because it's possible for another thread to interfere with it for an
3739 // arbitrary length of time, but programs aren't allowed to rely on that.
3740 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3741 return !LI->isVolatile();
3742 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3743 return !SI->isVolatile();
3744 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3745 return !CXI->isVolatile();
3746 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3747 return !RMWI->isVolatile();
3748 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3749 return !MII->isVolatile();
3751 // If there is no successor, then execution can't transfer to it.
3752 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3753 return !CRI->unwindsToCaller();
3754 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3755 return !CatchSwitch->unwindsToCaller();
3756 if (isa<ResumeInst>(I))
3758 if (isa<ReturnInst>(I))
3760 if (isa<UnreachableInst>(I))
3763 // Calls can throw, or contain an infinite loop, or kill the process.
3764 if (auto CS = ImmutableCallSite(I)) {
3765 // Call sites that throw have implicit non-local control flow.
3766 if (!CS.doesNotThrow())
3769 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3770 // etc. and thus not return. However, LLVM already assumes that
3772 // - Thread exiting actions are modeled as writes to memory invisible to
3775 // - Loops that don't have side effects (side effects are volatile/atomic
3776 // stores and IO) always terminate (see http://llvm.org/PR965).
3777 // Furthermore IO itself is also modeled as writes to memory invisible to
3780 // We rely on those assumptions here, and use the memory effects of the call
3781 // target as a proxy for checking that it always returns.
3783 // FIXME: This isn't aggressive enough; a call which only writes to a global
3784 // is guaranteed to return.
3785 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3786 match(I, m_Intrinsic<Intrinsic::assume>());
3789 // Other instructions return normally.
3793 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3795 // The loop header is guaranteed to be executed for every iteration.
3797 // FIXME: Relax this constraint to cover all basic blocks that are
3798 // guaranteed to be executed at every iteration.
3799 if (I->getParent() != L->getHeader()) return false;
3801 for (const Instruction &LI : *L->getHeader()) {
3802 if (&LI == I) return true;
3803 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3805 llvm_unreachable("Instruction not contained in its own parent basic block.");
3808 bool llvm::propagatesFullPoison(const Instruction *I) {
3809 switch (I->getOpcode()) {
3810 case Instruction::Add:
3811 case Instruction::Sub:
3812 case Instruction::Xor:
3813 case Instruction::Trunc:
3814 case Instruction::BitCast:
3815 case Instruction::AddrSpaceCast:
3816 case Instruction::Mul:
3817 case Instruction::Shl:
3818 case Instruction::GetElementPtr:
3819 // These operations all propagate poison unconditionally. Note that poison
3820 // is not any particular value, so xor or subtraction of poison with
3821 // itself still yields poison, not zero.
3824 case Instruction::AShr:
3825 case Instruction::SExt:
3826 // For these operations, one bit of the input is replicated across
3827 // multiple output bits. A replicated poison bit is still poison.
3830 case Instruction::ICmp:
3831 // Comparing poison with any value yields poison. This is why, for
3832 // instance, x s< (x +nsw 1) can be folded to true.
3840 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3841 switch (I->getOpcode()) {
3842 case Instruction::Store:
3843 return cast<StoreInst>(I)->getPointerOperand();
3845 case Instruction::Load:
3846 return cast<LoadInst>(I)->getPointerOperand();
3848 case Instruction::AtomicCmpXchg:
3849 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3851 case Instruction::AtomicRMW:
3852 return cast<AtomicRMWInst>(I)->getPointerOperand();
3854 case Instruction::UDiv:
3855 case Instruction::SDiv:
3856 case Instruction::URem:
3857 case Instruction::SRem:
3858 return I->getOperand(1);
3865 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
3866 // We currently only look for uses of poison values within the same basic
3867 // block, as that makes it easier to guarantee that the uses will be
3868 // executed given that PoisonI is executed.
3870 // FIXME: Expand this to consider uses beyond the same basic block. To do
3871 // this, look out for the distinction between post-dominance and strong
3873 const BasicBlock *BB = PoisonI->getParent();
3875 // Set of instructions that we have proved will yield poison if PoisonI
3877 SmallSet<const Value *, 16> YieldsPoison;
3878 SmallSet<const BasicBlock *, 4> Visited;
3879 YieldsPoison.insert(PoisonI);
3880 Visited.insert(PoisonI->getParent());
3882 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3885 while (Iter++ < MaxDepth) {
3886 for (auto &I : make_range(Begin, End)) {
3887 if (&I != PoisonI) {
3888 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3889 if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3891 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3895 // Mark poison that propagates from I through uses of I.
3896 if (YieldsPoison.count(&I)) {
3897 for (const User *User : I.users()) {
3898 const Instruction *UserI = cast<Instruction>(User);
3899 if (propagatesFullPoison(UserI))
3900 YieldsPoison.insert(User);
3905 if (auto *NextBB = BB->getSingleSuccessor()) {
3906 if (Visited.insert(NextBB).second) {
3908 Begin = BB->getFirstNonPHI()->getIterator();
3919 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
3923 if (auto *C = dyn_cast<ConstantFP>(V))
3928 static bool isKnownNonZero(const Value *V) {
3929 if (auto *C = dyn_cast<ConstantFP>(V))
3930 return !C->isZero();
3934 /// Match non-obvious integer minimum and maximum sequences.
3935 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
3936 Value *CmpLHS, Value *CmpRHS,
3937 Value *TrueVal, Value *FalseVal,
3938 Value *&LHS, Value *&RHS) {
3939 // Assume success. If there's no match, callers should not use these anyway.
3943 // Recognize variations of:
3944 // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
3946 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
3949 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
3950 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3951 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
3952 return {SPF_SMAX, SPNB_NA, false};
3954 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
3955 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3956 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
3957 return {SPF_SMIN, SPNB_NA, false};
3959 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
3960 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3961 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
3962 return {SPF_UMAX, SPNB_NA, false};
3964 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
3965 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3966 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
3967 return {SPF_UMIN, SPNB_NA, false};
3970 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
3971 return {SPF_UNKNOWN, SPNB_NA, false};
3974 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
3975 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
3976 if (match(TrueVal, m_Zero()) &&
3977 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3978 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
3981 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
3982 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
3983 if (match(FalseVal, m_Zero()) &&
3984 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3985 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
3987 if (!match(CmpRHS, m_APInt(C1)))
3988 return {SPF_UNKNOWN, SPNB_NA, false};
3990 // An unsigned min/max can be written with a signed compare.
3992 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
3993 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
3994 // Is the sign bit set?
3995 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
3996 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
3997 if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue())
3998 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4000 // Is the sign bit clear?
4001 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4002 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4003 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4004 C2->isMinSignedValue())
4005 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4008 // Look through 'not' ops to find disguised signed min/max.
4009 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4010 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4011 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4012 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4013 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4015 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4016 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4017 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4018 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4019 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4021 return {SPF_UNKNOWN, SPNB_NA, false};
4024 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4026 Value *CmpLHS, Value *CmpRHS,
4027 Value *TrueVal, Value *FalseVal,
4028 Value *&LHS, Value *&RHS) {
4032 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may
4033 // return inconsistent results between implementations.
4034 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4035 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4036 // Therefore we behave conservatively and only proceed if at least one of the
4037 // operands is known to not be zero, or if we don't care about signed zeroes.
4040 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4041 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4042 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4043 !isKnownNonZero(CmpRHS))
4044 return {SPF_UNKNOWN, SPNB_NA, false};
4047 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4048 bool Ordered = false;
4050 // When given one NaN and one non-NaN input:
4051 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4052 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4053 // ordered comparison fails), which could be NaN or non-NaN.
4054 // so here we discover exactly what NaN behavior is required/accepted.
4055 if (CmpInst::isFPPredicate(Pred)) {
4056 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4057 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4059 if (LHSSafe && RHSSafe) {
4060 // Both operands are known non-NaN.
4061 NaNBehavior = SPNB_RETURNS_ANY;
4062 } else if (CmpInst::isOrdered(Pred)) {
4063 // An ordered comparison will return false when given a NaN, so it
4067 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4068 NaNBehavior = SPNB_RETURNS_NAN;
4070 NaNBehavior = SPNB_RETURNS_OTHER;
4072 // Completely unsafe.
4073 return {SPF_UNKNOWN, SPNB_NA, false};
4076 // An unordered comparison will return true when given a NaN, so it
4079 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4080 NaNBehavior = SPNB_RETURNS_OTHER;
4082 NaNBehavior = SPNB_RETURNS_NAN;
4084 // Completely unsafe.
4085 return {SPF_UNKNOWN, SPNB_NA, false};
4089 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4090 std::swap(CmpLHS, CmpRHS);
4091 Pred = CmpInst::getSwappedPredicate(Pred);
4092 if (NaNBehavior == SPNB_RETURNS_NAN)
4093 NaNBehavior = SPNB_RETURNS_OTHER;
4094 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4095 NaNBehavior = SPNB_RETURNS_NAN;
4099 // ([if]cmp X, Y) ? X : Y
4100 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4102 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4103 case ICmpInst::ICMP_UGT:
4104 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4105 case ICmpInst::ICMP_SGT:
4106 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4107 case ICmpInst::ICMP_ULT:
4108 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4109 case ICmpInst::ICMP_SLT:
4110 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4111 case FCmpInst::FCMP_UGT:
4112 case FCmpInst::FCMP_UGE:
4113 case FCmpInst::FCMP_OGT:
4114 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4115 case FCmpInst::FCMP_ULT:
4116 case FCmpInst::FCMP_ULE:
4117 case FCmpInst::FCMP_OLT:
4118 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4123 if (match(CmpRHS, m_APInt(C1))) {
4124 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4125 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4127 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4128 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4129 if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) {
4130 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4133 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4134 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4135 if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) {
4136 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4141 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4144 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4145 Instruction::CastOps *CastOp) {
4146 auto *Cast1 = dyn_cast<CastInst>(V1);
4150 *CastOp = Cast1->getOpcode();
4151 Type *SrcTy = Cast1->getSrcTy();
4152 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4153 // If V1 and V2 are both the same cast from the same type, look through V1.
4154 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4155 return Cast2->getOperand(0);
4159 auto *C = dyn_cast<Constant>(V2);
4163 Constant *CastedTo = nullptr;
4165 case Instruction::ZExt:
4166 if (CmpI->isUnsigned())
4167 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4169 case Instruction::SExt:
4170 if (CmpI->isSigned())
4171 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4173 case Instruction::Trunc:
4174 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4176 case Instruction::FPTrunc:
4177 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4179 case Instruction::FPExt:
4180 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4182 case Instruction::FPToUI:
4183 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4185 case Instruction::FPToSI:
4186 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4188 case Instruction::UIToFP:
4189 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4191 case Instruction::SIToFP:
4192 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4201 // Make sure the cast doesn't lose any information.
4202 Constant *CastedBack =
4203 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4204 if (CastedBack != C)
4210 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4211 Instruction::CastOps *CastOp) {
4212 SelectInst *SI = dyn_cast<SelectInst>(V);
4213 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4215 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4216 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4218 CmpInst::Predicate Pred = CmpI->getPredicate();
4219 Value *CmpLHS = CmpI->getOperand(0);
4220 Value *CmpRHS = CmpI->getOperand(1);
4221 Value *TrueVal = SI->getTrueValue();
4222 Value *FalseVal = SI->getFalseValue();
4224 if (isa<FPMathOperator>(CmpI))
4225 FMF = CmpI->getFastMathFlags();
4228 if (CmpI->isEquality())
4229 return {SPF_UNKNOWN, SPNB_NA, false};
4231 // Deal with type mismatches.
4232 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4233 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4234 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4235 cast<CastInst>(TrueVal)->getOperand(0), C,
4237 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4238 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4239 C, cast<CastInst>(FalseVal)->getOperand(0),
4242 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4246 /// Return true if "icmp Pred LHS RHS" is always true.
4247 static bool isTruePredicate(CmpInst::Predicate Pred,
4248 const Value *LHS, const Value *RHS,
4249 const DataLayout &DL, unsigned Depth,
4250 AssumptionCache *AC, const Instruction *CxtI,
4251 const DominatorTree *DT) {
4252 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4253 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4260 case CmpInst::ICMP_SLE: {
4263 // LHS s<= LHS +_{nsw} C if C >= 0
4264 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4265 return !C->isNegative();
4269 case CmpInst::ICMP_ULE: {
4272 // LHS u<= LHS +_{nuw} C for any C
4273 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4276 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4277 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4279 const APInt *&CA, const APInt *&CB) {
4280 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4281 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4284 // If X & C == 0 then (X | C) == X +_{nuw} C
4285 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4286 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4287 KnownBits Known(CA->getBitWidth());
4288 computeKnownBits(X, Known, DL, Depth + 1, AC, CxtI, DT);
4290 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4298 const APInt *CLHS, *CRHS;
4299 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4300 return CLHS->ule(*CRHS);
4307 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4308 /// ALHS ARHS" is true. Otherwise, return None.
4309 static Optional<bool>
4310 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4311 const Value *ARHS, const Value *BLHS,
4312 const Value *BRHS, const DataLayout &DL,
4313 unsigned Depth, AssumptionCache *AC,
4314 const Instruction *CxtI, const DominatorTree *DT) {
4319 case CmpInst::ICMP_SLT:
4320 case CmpInst::ICMP_SLE:
4321 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4323 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4327 case CmpInst::ICMP_ULT:
4328 case CmpInst::ICMP_ULE:
4329 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4331 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4337 /// Return true if the operands of the two compares match. IsSwappedOps is true
4338 /// when the operands match, but are swapped.
4339 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4340 const Value *BLHS, const Value *BRHS,
4341 bool &IsSwappedOps) {
4343 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4344 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4345 return IsMatchingOps || IsSwappedOps;
4348 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4349 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4350 /// BRHS" is false. Otherwise, return None if we can't infer anything.
4351 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4354 CmpInst::Predicate BPred,
4357 bool IsSwappedOps) {
4358 // Canonicalize the operands so they're matching.
4360 std::swap(BLHS, BRHS);
4361 BPred = ICmpInst::getSwappedPredicate(BPred);
4363 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4365 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4371 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4372 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4373 /// C2" is false. Otherwise, return None if we can't infer anything.
4374 static Optional<bool>
4375 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4376 const ConstantInt *C1,
4377 CmpInst::Predicate BPred,
4378 const Value *BLHS, const ConstantInt *C2) {
4379 assert(ALHS == BLHS && "LHS operands must match.");
4380 ConstantRange DomCR =
4381 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4383 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4384 ConstantRange Intersection = DomCR.intersectWith(CR);
4385 ConstantRange Difference = DomCR.difference(CR);
4386 if (Intersection.isEmptySet())
4388 if (Difference.isEmptySet())
4393 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4394 const DataLayout &DL, bool LHSIsFalse,
4395 unsigned Depth, AssumptionCache *AC,
4396 const Instruction *CxtI,
4397 const DominatorTree *DT) {
4398 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4399 if (LHS->getType() != RHS->getType())
4402 Type *OpTy = LHS->getType();
4403 assert(OpTy->isIntOrIntVectorTy(1));
4405 // LHS ==> RHS by definition
4409 if (OpTy->isVectorTy())
4410 // TODO: extending the code below to handle vectors
4412 assert(OpTy->isIntegerTy(1) && "implied by above");
4415 ICmpInst::Predicate BPred;
4416 // We expect the RHS to be an icmp.
4417 if (!match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4421 ICmpInst::Predicate APred;
4422 // The LHS can be an 'or', 'and', or 'icmp'.
4423 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS)))) {
4424 // The remaining tests are all recursive, so bail out if we hit the limit.
4425 if (Depth == MaxDepth)
4427 // If the result of an 'or' is false, then we know both legs of the 'or' are
4428 // false. Similarly, if the result of an 'and' is true, then we know both
4429 // legs of the 'and' are true.
4430 if ((LHSIsFalse && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4431 (!LHSIsFalse && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4432 if (Optional<bool> Implication = isImpliedCondition(
4433 ALHS, RHS, DL, LHSIsFalse, Depth + 1, AC, CxtI, DT))
4435 if (Optional<bool> Implication = isImpliedCondition(
4436 ARHS, RHS, DL, LHSIsFalse, Depth + 1, AC, CxtI, DT))
4442 // All of the below logic assumes both LHS and RHS are icmps.
4443 assert(isa<ICmpInst>(LHS) && isa<ICmpInst>(RHS) && "Expected icmps.");
4445 // The rest of the logic assumes the LHS condition is true. If that's not the
4446 // case, invert the predicate to make it so.
4448 APred = CmpInst::getInversePredicate(APred);
4450 // Can we infer anything when the two compares have matching operands?
4452 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4453 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4454 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4456 // No amount of additional analysis will infer the second condition, so
4461 // Can we infer anything when the LHS operands match and the RHS operands are
4462 // constants (not necessarily matching)?
4463 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4464 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4465 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4466 cast<ConstantInt>(BRHS)))
4468 // No amount of additional analysis will infer the second condition, so
4474 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,