1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/GetElementPtrTypeIterator.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Operator.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/IR/Statepoint.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/MathExtras.h"
46 using namespace llvm::PatternMatch;
48 const unsigned MaxDepth = 6;
50 // Controls the number of uses of the value searched for possible
51 // dominating comparisons.
52 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
53 cl::Hidden, cl::init(20));
55 // This optimization is known to cause performance regressions is some cases,
56 // keep it under a temporary flag for now.
58 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
59 cl::Hidden, cl::init(true));
61 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
62 /// 0). For vector types, returns the element type's bitwidth.
63 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
64 if (unsigned BitWidth = Ty->getScalarSizeInBits())
67 return DL.getPointerTypeSizeInBits(Ty);
71 // Simplifying using an assume can only be done in a particular control-flow
72 // context (the context instruction provides that context). If an assume and
73 // the context instruction are not in the same block then the DT helps in
74 // figuring out if we can use it.
78 const Instruction *CxtI;
79 const DominatorTree *DT;
80 // Unlike the other analyses, this may be a nullptr because not all clients
81 // provide it currently.
82 OptimizationRemarkEmitter *ORE;
84 /// Set of assumptions that should be excluded from further queries.
85 /// This is because of the potential for mutual recursion to cause
86 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
87 /// classic case of this is assume(x = y), which will attempt to determine
88 /// bits in x from bits in y, which will attempt to determine bits in y from
89 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
90 /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
91 /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
93 std::array<const Value *, MaxDepth> Excluded;
96 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
97 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
98 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {}
100 Query(const Query &Q, const Value *NewExcl)
101 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
102 NumExcluded(Q.NumExcluded) {
103 Excluded = Q.Excluded;
104 Excluded[NumExcluded++] = NewExcl;
105 assert(NumExcluded <= Excluded.size());
108 bool isExcluded(const Value *Value) const {
109 if (NumExcluded == 0)
111 auto End = Excluded.begin() + NumExcluded;
112 return std::find(Excluded.begin(), End, Value) != End;
115 } // end anonymous namespace
117 // Given the provided Value and, potentially, a context instruction, return
118 // the preferred context instruction (if any).
119 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
120 // If we've been provided with a context instruction, then use that (provided
121 // it has been inserted).
122 if (CxtI && CxtI->getParent())
125 // If the value is really an already-inserted instruction, then use that.
126 CxtI = dyn_cast<Instruction>(V);
127 if (CxtI && CxtI->getParent())
133 static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
134 unsigned Depth, const Query &Q);
136 void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
137 const DataLayout &DL, unsigned Depth,
138 AssumptionCache *AC, const Instruction *CxtI,
139 const DominatorTree *DT,
140 OptimizationRemarkEmitter *ORE) {
141 ::computeKnownBits(V, KnownZero, KnownOne, Depth,
142 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
145 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
146 const DataLayout &DL,
147 AssumptionCache *AC, const Instruction *CxtI,
148 const DominatorTree *DT) {
149 assert(LHS->getType() == RHS->getType() &&
150 "LHS and RHS should have the same type");
151 assert(LHS->getType()->isIntOrIntVectorTy() &&
152 "LHS and RHS should be integers");
153 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
154 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
155 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
156 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
157 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
158 return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
161 static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
162 unsigned Depth, const Query &Q);
164 void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
165 const DataLayout &DL, unsigned Depth,
166 AssumptionCache *AC, const Instruction *CxtI,
167 const DominatorTree *DT) {
168 ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
169 Query(DL, AC, safeCxtI(V, CxtI), DT));
172 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
175 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
177 unsigned Depth, AssumptionCache *AC,
178 const Instruction *CxtI,
179 const DominatorTree *DT) {
180 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
181 Query(DL, AC, safeCxtI(V, CxtI), DT));
184 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
186 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
187 AssumptionCache *AC, const Instruction *CxtI,
188 const DominatorTree *DT) {
189 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
192 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
194 AssumptionCache *AC, const Instruction *CxtI,
195 const DominatorTree *DT) {
196 bool NonNegative, Negative;
197 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
201 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
202 AssumptionCache *AC, const Instruction *CxtI,
203 const DominatorTree *DT) {
204 if (auto *CI = dyn_cast<ConstantInt>(V))
205 return CI->getValue().isStrictlyPositive();
207 // TODO: We'd doing two recursive queries here. We should factor this such
208 // that only a single query is needed.
209 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
210 isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
213 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
214 AssumptionCache *AC, const Instruction *CxtI,
215 const DominatorTree *DT) {
216 bool NonNegative, Negative;
217 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
221 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
223 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
224 const DataLayout &DL,
225 AssumptionCache *AC, const Instruction *CxtI,
226 const DominatorTree *DT) {
227 return ::isKnownNonEqual(V1, V2, Query(DL, AC,
228 safeCxtI(V1, safeCxtI(V2, CxtI)),
232 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
235 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
236 const DataLayout &DL,
237 unsigned Depth, AssumptionCache *AC,
238 const Instruction *CxtI, const DominatorTree *DT) {
239 return ::MaskedValueIsZero(V, Mask, Depth,
240 Query(DL, AC, safeCxtI(V, CxtI), DT));
243 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
246 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
247 unsigned Depth, AssumptionCache *AC,
248 const Instruction *CxtI,
249 const DominatorTree *DT) {
250 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
253 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
255 APInt &KnownZero, APInt &KnownOne,
256 APInt &KnownZero2, APInt &KnownOne2,
257 unsigned Depth, const Query &Q) {
258 unsigned BitWidth = KnownZero.getBitWidth();
260 // If an initial sequence of bits in the result is not needed, the
261 // corresponding bits in the operands are not needed.
262 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
263 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
264 computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
266 // Carry in a 1 for a subtract, rather than a 0.
267 uint64_t CarryIn = 0;
269 // Sum = LHS + ~RHS + 1
270 std::swap(KnownZero2, KnownOne2);
274 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
275 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
277 // Compute known bits of the carry.
278 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
279 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
281 // Compute set of known bits (where all three relevant bits are known).
282 APInt LHSKnown = LHSKnownZero | LHSKnownOne;
283 APInt RHSKnown = KnownZero2 | KnownOne2;
284 APInt CarryKnown = CarryKnownZero | CarryKnownOne;
285 APInt Known = LHSKnown & RHSKnown & CarryKnown;
287 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
288 "known bits of sum differ");
290 // Compute known bits of the result.
291 KnownZero = ~PossibleSumOne & Known;
292 KnownOne = PossibleSumOne & Known;
294 // Are we still trying to solve for the sign bit?
295 if (!Known.isNegative()) {
297 // Adding two non-negative numbers, or subtracting a negative number from
298 // a non-negative one, can't wrap into negative.
299 if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
300 KnownZero.setSignBit();
301 // Adding two negative numbers, or subtracting a non-negative number from
302 // a negative one, can't wrap into non-negative.
303 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
304 KnownOne.setSignBit();
309 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
310 APInt &KnownZero, APInt &KnownOne,
311 APInt &KnownZero2, APInt &KnownOne2,
312 unsigned Depth, const Query &Q) {
313 unsigned BitWidth = KnownZero.getBitWidth();
314 computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
315 computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
317 bool isKnownNegative = false;
318 bool isKnownNonNegative = false;
319 // If the multiplication is known not to overflow, compute the sign bit.
322 // The product of a number with itself is non-negative.
323 isKnownNonNegative = true;
325 bool isKnownNonNegativeOp1 = KnownZero.isNegative();
326 bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
327 bool isKnownNegativeOp1 = KnownOne.isNegative();
328 bool isKnownNegativeOp0 = KnownOne2.isNegative();
329 // The product of two numbers with the same sign is non-negative.
330 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
331 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
332 // The product of a negative number and a non-negative number is either
334 if (!isKnownNonNegative)
335 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
336 isKnownNonZero(Op0, Depth, Q)) ||
337 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
338 isKnownNonZero(Op1, Depth, Q));
342 // If low bits are zero in either operand, output low known-0 bits.
343 // Also compute a conservative estimate for high known-0 bits.
344 // More trickiness is possible, but this is sufficient for the
345 // interesting case of alignment computation.
346 KnownOne.clearAllBits();
347 unsigned TrailZ = KnownZero.countTrailingOnes() +
348 KnownZero2.countTrailingOnes();
349 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
350 KnownZero2.countLeadingOnes(),
351 BitWidth) - BitWidth;
353 TrailZ = std::min(TrailZ, BitWidth);
354 LeadZ = std::min(LeadZ, BitWidth);
355 KnownZero.clearAllBits();
356 KnownZero.setLowBits(TrailZ);
357 KnownZero.setHighBits(LeadZ);
359 // Only make use of no-wrap flags if we failed to compute the sign bit
360 // directly. This matters if the multiplication always overflows, in
361 // which case we prefer to follow the result of the direct computation,
362 // though as the program is invoking undefined behaviour we can choose
363 // whatever we like here.
364 if (isKnownNonNegative && !KnownOne.isNegative())
365 KnownZero.setSignBit();
366 else if (isKnownNegative && !KnownZero.isNegative())
367 KnownOne.setSignBit();
370 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
373 unsigned BitWidth = KnownZero.getBitWidth();
374 unsigned NumRanges = Ranges.getNumOperands() / 2;
375 assert(NumRanges >= 1);
377 KnownZero.setAllBits();
378 KnownOne.setAllBits();
380 for (unsigned i = 0; i < NumRanges; ++i) {
382 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
384 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
385 ConstantRange Range(Lower->getValue(), Upper->getValue());
387 // The first CommonPrefixBits of all values in Range are equal.
388 unsigned CommonPrefixBits =
389 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
391 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
392 KnownOne &= Range.getUnsignedMax() & Mask;
393 KnownZero &= ~Range.getUnsignedMax() & Mask;
397 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
398 SmallVector<const Value *, 16> WorkSet(1, I);
399 SmallPtrSet<const Value *, 32> Visited;
400 SmallPtrSet<const Value *, 16> EphValues;
402 // The instruction defining an assumption's condition itself is always
403 // considered ephemeral to that assumption (even if it has other
404 // non-ephemeral users). See r246696's test case for an example.
405 if (is_contained(I->operands(), E))
408 while (!WorkSet.empty()) {
409 const Value *V = WorkSet.pop_back_val();
410 if (!Visited.insert(V).second)
413 // If all uses of this value are ephemeral, then so is this value.
414 if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
419 if (const User *U = dyn_cast<User>(V))
420 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
422 if (isSafeToSpeculativelyExecute(*J))
423 WorkSet.push_back(*J);
431 // Is this an intrinsic that cannot be speculated but also cannot trap?
432 static bool isAssumeLikeIntrinsic(const Instruction *I) {
433 if (const CallInst *CI = dyn_cast<CallInst>(I))
434 if (Function *F = CI->getCalledFunction())
435 switch (F->getIntrinsicID()) {
437 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
438 case Intrinsic::assume:
439 case Intrinsic::dbg_declare:
440 case Intrinsic::dbg_value:
441 case Intrinsic::invariant_start:
442 case Intrinsic::invariant_end:
443 case Intrinsic::lifetime_start:
444 case Intrinsic::lifetime_end:
445 case Intrinsic::objectsize:
446 case Intrinsic::ptr_annotation:
447 case Intrinsic::var_annotation:
454 bool llvm::isValidAssumeForContext(const Instruction *Inv,
455 const Instruction *CxtI,
456 const DominatorTree *DT) {
458 // There are two restrictions on the use of an assume:
459 // 1. The assume must dominate the context (or the control flow must
460 // reach the assume whenever it reaches the context).
461 // 2. The context must not be in the assume's set of ephemeral values
462 // (otherwise we will use the assume to prove that the condition
463 // feeding the assume is trivially true, thus causing the removal of
467 if (DT->dominates(Inv, CxtI))
469 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
470 // We don't have a DT, but this trivially dominates.
474 // With or without a DT, the only remaining case we will check is if the
475 // instructions are in the same BB. Give up if that is not the case.
476 if (Inv->getParent() != CxtI->getParent())
479 // If we have a dom tree, then we now know that the assume doens't dominate
480 // the other instruction. If we don't have a dom tree then we can check if
481 // the assume is first in the BB.
483 // Search forward from the assume until we reach the context (or the end
484 // of the block); the common case is that the assume will come first.
485 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
486 IE = Inv->getParent()->end(); I != IE; ++I)
491 // The context comes first, but they're both in the same block. Make sure
492 // there is nothing in between that might interrupt the control flow.
493 for (BasicBlock::const_iterator I =
494 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
496 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
499 return !isEphemeralValueOf(Inv, CxtI);
502 static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
503 APInt &KnownOne, unsigned Depth,
505 // Use of assumptions is context-sensitive. If we don't have a context, we
507 if (!Q.AC || !Q.CxtI)
510 unsigned BitWidth = KnownZero.getBitWidth();
512 // Note that the patterns below need to be kept in sync with the code
513 // in AssumptionCache::updateAffectedValues.
515 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
518 CallInst *I = cast<CallInst>(AssumeVH);
519 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
520 "Got assumption for the wrong function!");
524 // Warning: This loop can end up being somewhat performance sensetive.
525 // We're running this loop for once for each value queried resulting in a
526 // runtime of ~O(#assumes * #values).
528 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
529 "must be an assume intrinsic");
531 Value *Arg = I->getArgOperand(0);
533 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
534 assert(BitWidth == 1 && "assume operand is not i1?");
535 KnownZero.clearAllBits();
536 KnownOne.setAllBits();
539 if (match(Arg, m_Not(m_Specific(V))) &&
540 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
541 assert(BitWidth == 1 && "assume operand is not i1?");
542 KnownZero.setAllBits();
543 KnownOne.clearAllBits();
547 // The remaining tests are all recursive, so bail out if we hit the limit.
548 if (Depth == MaxDepth)
552 auto m_V = m_CombineOr(m_Specific(V),
553 m_CombineOr(m_PtrToInt(m_Specific(V)),
554 m_BitCast(m_Specific(V))));
556 CmpInst::Predicate Pred;
559 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
560 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
561 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
562 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
563 KnownZero |= RHSKnownZero;
564 KnownOne |= RHSKnownOne;
566 } else if (match(Arg,
567 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
568 Pred == ICmpInst::ICMP_EQ &&
569 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
570 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
571 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
572 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
573 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
575 // For those bits in the mask that are known to be one, we can propagate
576 // known bits from the RHS to V.
577 KnownZero |= RHSKnownZero & MaskKnownOne;
578 KnownOne |= RHSKnownOne & MaskKnownOne;
579 // assume(~(v & b) = a)
580 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
582 Pred == ICmpInst::ICMP_EQ &&
583 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
584 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
585 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
586 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
587 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
589 // For those bits in the mask that are known to be one, we can propagate
590 // inverted known bits from the RHS to V.
591 KnownZero |= RHSKnownOne & MaskKnownOne;
592 KnownOne |= RHSKnownZero & MaskKnownOne;
594 } else if (match(Arg,
595 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
596 Pred == ICmpInst::ICMP_EQ &&
597 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
598 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
599 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
600 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
601 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
603 // For those bits in B that are known to be zero, we can propagate known
604 // bits from the RHS to V.
605 KnownZero |= RHSKnownZero & BKnownZero;
606 KnownOne |= RHSKnownOne & BKnownZero;
607 // assume(~(v | b) = a)
608 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
610 Pred == ICmpInst::ICMP_EQ &&
611 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
613 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
614 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
615 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
617 // For those bits in B that are known to be zero, we can propagate
618 // inverted known bits from the RHS to V.
619 KnownZero |= RHSKnownOne & BKnownZero;
620 KnownOne |= RHSKnownZero & BKnownZero;
622 } else if (match(Arg,
623 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
624 Pred == ICmpInst::ICMP_EQ &&
625 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
626 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
627 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
628 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
629 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
631 // For those bits in B that are known to be zero, we can propagate known
632 // bits from the RHS to V. For those bits in B that are known to be one,
633 // we can propagate inverted known bits from the RHS to V.
634 KnownZero |= RHSKnownZero & BKnownZero;
635 KnownOne |= RHSKnownOne & BKnownZero;
636 KnownZero |= RHSKnownOne & BKnownOne;
637 KnownOne |= RHSKnownZero & BKnownOne;
638 // assume(~(v ^ b) = a)
639 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
641 Pred == ICmpInst::ICMP_EQ &&
642 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
643 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
644 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
645 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
646 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
648 // For those bits in B that are known to be zero, we can propagate
649 // inverted known bits from the RHS to V. For those bits in B that are
650 // known to be one, we can propagate known bits from the RHS to V.
651 KnownZero |= RHSKnownOne & BKnownZero;
652 KnownOne |= RHSKnownZero & BKnownZero;
653 KnownZero |= RHSKnownZero & BKnownOne;
654 KnownOne |= RHSKnownOne & BKnownOne;
655 // assume(v << c = a)
656 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
658 Pred == ICmpInst::ICMP_EQ &&
659 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
660 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
661 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
662 // For those bits in RHS that are known, we can propagate them to known
663 // bits in V shifted to the right by C.
664 KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
665 KnownOne |= RHSKnownOne.lshr(C->getZExtValue());
666 // assume(~(v << c) = a)
667 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
669 Pred == ICmpInst::ICMP_EQ &&
670 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
671 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
672 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
673 // For those bits in RHS that are known, we can propagate them inverted
674 // to known bits in V shifted to the right by C.
675 KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
676 KnownOne |= RHSKnownZero.lshr(C->getZExtValue());
677 // assume(v >> c = a)
678 } else if (match(Arg,
679 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
680 m_AShr(m_V, m_ConstantInt(C))),
682 Pred == ICmpInst::ICMP_EQ &&
683 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
684 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
685 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
686 // For those bits in RHS that are known, we can propagate them to known
687 // bits in V shifted to the right by C.
688 KnownZero |= RHSKnownZero << C->getZExtValue();
689 KnownOne |= RHSKnownOne << C->getZExtValue();
690 // assume(~(v >> c) = a)
691 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
692 m_LShr(m_V, m_ConstantInt(C)),
693 m_AShr(m_V, m_ConstantInt(C)))),
695 Pred == ICmpInst::ICMP_EQ &&
696 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
697 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
698 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
699 // For those bits in RHS that are known, we can propagate them inverted
700 // to known bits in V shifted to the right by C.
701 KnownZero |= RHSKnownOne << C->getZExtValue();
702 KnownOne |= RHSKnownZero << C->getZExtValue();
703 // assume(v >=_s c) where c is non-negative
704 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
705 Pred == ICmpInst::ICMP_SGE &&
706 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
707 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
708 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
710 if (RHSKnownZero.isNegative()) {
711 // We know that the sign bit is zero.
712 KnownZero.setSignBit();
714 // assume(v >_s c) where c is at least -1.
715 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
716 Pred == ICmpInst::ICMP_SGT &&
717 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
718 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
719 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
721 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
722 // We know that the sign bit is zero.
723 KnownZero.setSignBit();
725 // assume(v <=_s c) where c is negative
726 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
727 Pred == ICmpInst::ICMP_SLE &&
728 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
730 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
732 if (RHSKnownOne.isNegative()) {
733 // We know that the sign bit is one.
734 KnownOne.setSignBit();
736 // assume(v <_s c) where c is non-positive
737 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
738 Pred == ICmpInst::ICMP_SLT &&
739 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
740 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
741 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
743 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
744 // We know that the sign bit is one.
745 KnownOne.setSignBit();
748 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
749 Pred == ICmpInst::ICMP_ULE &&
750 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
751 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
752 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
754 // Whatever high bits in c are zero are known to be zero.
755 KnownZero.setHighBits(RHSKnownZero.countLeadingOnes());
757 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
758 Pred == ICmpInst::ICMP_ULT &&
759 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
760 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
761 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
763 // Whatever high bits in c are zero are known to be zero (if c is a power
764 // of 2, then one more).
765 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
766 KnownZero.setHighBits(RHSKnownZero.countLeadingOnes()+1);
768 KnownZero.setHighBits(RHSKnownZero.countLeadingOnes());
772 // If assumptions conflict with each other or previous known bits, then we
773 // have a logical fallacy. It's possible that the assumption is not reachable,
774 // so this isn't a real bug. On the other hand, the program may have undefined
775 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
776 // clear out the known bits, try to warn the user, and hope for the best.
777 if ((KnownZero & KnownOne) != 0) {
778 KnownZero.clearAllBits();
779 KnownOne.clearAllBits();
782 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
783 OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI);
784 Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may "
785 "have undefined behavior, or compiler may have "
791 // Compute known bits from a shift operator, including those with a
792 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
793 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
794 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
795 // functors that, given the known-zero or known-one bits respectively, and a
796 // shift amount, compute the implied known-zero or known-one bits of the shift
797 // operator's result respectively for that shift amount. The results from calling
798 // KZF and KOF are conservatively combined for all permitted shift amounts.
799 static void computeKnownBitsFromShiftOperator(
800 const Operator *I, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2,
801 APInt &KnownOne2, unsigned Depth, const Query &Q,
802 function_ref<APInt(const APInt &, unsigned)> KZF,
803 function_ref<APInt(const APInt &, unsigned)> KOF) {
804 unsigned BitWidth = KnownZero.getBitWidth();
806 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
807 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
809 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
810 KnownZero = KZF(KnownZero, ShiftAmt);
811 KnownOne = KOF(KnownOne, ShiftAmt);
812 // If there is conflict between KnownZero and KnownOne, this must be an
813 // overflowing left shift, so the shift result is undefined. Clear KnownZero
814 // and KnownOne bits so that other code could propagate this undef.
815 if ((KnownZero & KnownOne) != 0) {
816 KnownZero.clearAllBits();
817 KnownOne.clearAllBits();
823 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
825 // If the shift amount could be greater than or equal to the bit-width of the LHS, the
826 // value could be undef, so we don't know anything about it.
827 if ((~KnownZero).uge(BitWidth)) {
828 KnownZero.clearAllBits();
829 KnownOne.clearAllBits();
833 // Note: We cannot use KnownZero.getLimitedValue() here, because if
834 // BitWidth > 64 and any upper bits are known, we'll end up returning the
835 // limit value (which implies all bits are known).
836 uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
837 uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
839 // It would be more-clearly correct to use the two temporaries for this
840 // calculation. Reusing the APInts here to prevent unnecessary allocations.
841 KnownZero.clearAllBits();
842 KnownOne.clearAllBits();
844 // If we know the shifter operand is nonzero, we can sometimes infer more
845 // known bits. However this is expensive to compute, so be lazy about it and
846 // only compute it when absolutely necessary.
847 Optional<bool> ShifterOperandIsNonZero;
849 // Early exit if we can't constrain any well-defined shift amount.
850 if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
851 ShifterOperandIsNonZero =
852 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
853 if (!*ShifterOperandIsNonZero)
857 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
859 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
860 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
861 // Combine the shifted known input bits only for those shift amounts
862 // compatible with its known constraints.
863 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
865 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
867 // If we know the shifter is nonzero, we may be able to infer more known
868 // bits. This check is sunk down as far as possible to avoid the expensive
869 // call to isKnownNonZero if the cheaper checks above fail.
871 if (!ShifterOperandIsNonZero.hasValue())
872 ShifterOperandIsNonZero =
873 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
874 if (*ShifterOperandIsNonZero)
878 KnownZero &= KZF(KnownZero2, ShiftAmt);
879 KnownOne &= KOF(KnownOne2, ShiftAmt);
882 // If there are no compatible shift amounts, then we've proven that the shift
883 // amount must be >= the BitWidth, and the result is undefined. We could
884 // return anything we'd like, but we need to make sure the sets of known bits
885 // stay disjoint (it should be better for some other code to actually
886 // propagate the undef than to pick a value here using known bits).
887 if ((KnownZero & KnownOne) != 0) {
888 KnownZero.clearAllBits();
889 KnownOne.clearAllBits();
893 static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
894 APInt &KnownOne, unsigned Depth,
896 unsigned BitWidth = KnownZero.getBitWidth();
898 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
899 switch (I->getOpcode()) {
901 case Instruction::Load:
902 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
903 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
905 case Instruction::And: {
906 // If either the LHS or the RHS are Zero, the result is zero.
907 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
908 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
910 // Output known-1 bits are only known if set in both the LHS & RHS.
911 KnownOne &= KnownOne2;
912 // Output known-0 are known to be clear if zero in either the LHS | RHS.
913 KnownZero |= KnownZero2;
915 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
916 // here we handle the more general case of adding any odd number by
917 // matching the form add(x, add(x, y)) where y is odd.
918 // TODO: This could be generalized to clearing any bit set in y where the
919 // following bit is known to be unset in y.
921 if (!KnownZero[0] && !KnownOne[0] &&
922 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
924 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
926 KnownZero2.clearAllBits(); KnownOne2.clearAllBits();
927 computeKnownBits(Y, KnownZero2, KnownOne2, Depth + 1, Q);
928 if (KnownOne2.countTrailingOnes() > 0)
933 case Instruction::Or: {
934 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
935 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
937 // Output known-0 bits are only known if clear in both the LHS & RHS.
938 KnownZero &= KnownZero2;
939 // Output known-1 are known to be set if set in either the LHS | RHS.
940 KnownOne |= KnownOne2;
943 case Instruction::Xor: {
944 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
945 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
947 // Output known-0 bits are known if clear or set in both the LHS & RHS.
948 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
949 // Output known-1 are known to be set if set in only one of the LHS, RHS.
950 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
951 KnownZero = std::move(KnownZeroOut);
954 case Instruction::Mul: {
955 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
956 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
957 KnownOne, KnownZero2, KnownOne2, Depth, Q);
960 case Instruction::UDiv: {
961 // For the purposes of computing leading zeros we can conservatively
962 // treat a udiv as a logical right shift by the power of 2 known to
963 // be less than the denominator.
964 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
965 unsigned LeadZ = KnownZero2.countLeadingOnes();
967 KnownOne2.clearAllBits();
968 KnownZero2.clearAllBits();
969 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
970 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
971 if (RHSUnknownLeadingOnes != BitWidth)
972 LeadZ = std::min(BitWidth,
973 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
975 KnownZero.setHighBits(LeadZ);
978 case Instruction::Select: {
979 const Value *LHS, *RHS;
980 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
981 if (SelectPatternResult::isMinOrMax(SPF)) {
982 computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q);
983 computeKnownBits(LHS, KnownZero2, KnownOne2, Depth + 1, Q);
985 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
986 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
989 unsigned MaxHighOnes = 0;
990 unsigned MaxHighZeros = 0;
991 if (SPF == SPF_SMAX) {
992 // If both sides are negative, the result is negative.
993 if (KnownOne.isNegative() && KnownOne2.isNegative())
994 // We can derive a lower bound on the result by taking the max of the
997 std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
998 // If either side is non-negative, the result is non-negative.
999 else if (KnownZero.isNegative() || KnownZero2.isNegative())
1001 } else if (SPF == SPF_SMIN) {
1002 // If both sides are non-negative, the result is non-negative.
1003 if (KnownZero.isNegative() && KnownZero2.isNegative())
1004 // We can derive an upper bound on the result by taking the max of the
1005 // leading zero bits.
1006 MaxHighZeros = std::max(KnownZero.countLeadingOnes(),
1007 KnownZero2.countLeadingOnes());
1008 // If either side is negative, the result is negative.
1009 else if (KnownOne.isNegative() || KnownOne2.isNegative())
1011 } else if (SPF == SPF_UMAX) {
1012 // We can derive a lower bound on the result by taking the max of the
1013 // leading one bits.
1015 std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
1016 } else if (SPF == SPF_UMIN) {
1017 // We can derive an upper bound on the result by taking the max of the
1018 // leading zero bits.
1020 std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes());
1023 // Only known if known in both the LHS and RHS.
1024 KnownOne &= KnownOne2;
1025 KnownZero &= KnownZero2;
1026 if (MaxHighOnes > 0)
1027 KnownOne.setHighBits(MaxHighOnes);
1028 if (MaxHighZeros > 0)
1029 KnownZero.setHighBits(MaxHighZeros);
1032 case Instruction::FPTrunc:
1033 case Instruction::FPExt:
1034 case Instruction::FPToUI:
1035 case Instruction::FPToSI:
1036 case Instruction::SIToFP:
1037 case Instruction::UIToFP:
1038 break; // Can't work with floating point.
1039 case Instruction::PtrToInt:
1040 case Instruction::IntToPtr:
1041 // Fall through and handle them the same as zext/trunc.
1043 case Instruction::ZExt:
1044 case Instruction::Trunc: {
1045 Type *SrcTy = I->getOperand(0)->getType();
1047 unsigned SrcBitWidth;
1048 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1049 // which fall through here.
1050 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1052 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1053 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1054 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1055 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1056 KnownZero = KnownZero.zextOrTrunc(BitWidth);
1057 KnownOne = KnownOne.zextOrTrunc(BitWidth);
1058 // Any top bits are known to be zero.
1059 if (BitWidth > SrcBitWidth)
1060 KnownZero.setBitsFrom(SrcBitWidth);
1063 case Instruction::BitCast: {
1064 Type *SrcTy = I->getOperand(0)->getType();
1065 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1066 // TODO: For now, not handling conversions like:
1067 // (bitcast i64 %x to <2 x i32>)
1068 !I->getType()->isVectorTy()) {
1069 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1074 case Instruction::SExt: {
1075 // Compute the bits in the result that are not present in the input.
1076 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1078 KnownZero = KnownZero.trunc(SrcBitWidth);
1079 KnownOne = KnownOne.trunc(SrcBitWidth);
1080 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1081 // If the sign bit of the input is known set or clear, then we know the
1082 // top bits of the result.
1083 KnownZero = KnownZero.sext(BitWidth);
1084 KnownOne = KnownOne.sext(BitWidth);
1087 case Instruction::Shl: {
1088 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1089 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1090 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1091 APInt KZResult = KnownZero << ShiftAmt;
1092 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1093 // If this shift has "nsw" keyword, then the result is either a poison
1094 // value or has the same sign bit as the first operand.
1095 if (NSW && KnownZero.isNegative())
1096 KZResult.setSignBit();
1100 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1101 APInt KOResult = KnownOne << ShiftAmt;
1102 if (NSW && KnownOne.isNegative())
1103 KOResult.setSignBit();
1107 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1108 KnownZero2, KnownOne2, Depth, Q, KZF,
1112 case Instruction::LShr: {
1113 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1114 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1115 return KnownZero.lshr(ShiftAmt) |
1116 // High bits known zero.
1117 APInt::getHighBitsSet(BitWidth, ShiftAmt);
1120 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1121 return KnownOne.lshr(ShiftAmt);
1124 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1125 KnownZero2, KnownOne2, Depth, Q, KZF,
1129 case Instruction::AShr: {
1130 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1131 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1132 return KnownZero.ashr(ShiftAmt);
1135 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1136 return KnownOne.ashr(ShiftAmt);
1139 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1140 KnownZero2, KnownOne2, Depth, Q, KZF,
1144 case Instruction::Sub: {
1145 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1146 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1147 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1151 case Instruction::Add: {
1152 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1153 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1154 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1158 case Instruction::SRem:
1159 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1160 APInt RA = Rem->getValue().abs();
1161 if (RA.isPowerOf2()) {
1162 APInt LowBits = RA - 1;
1163 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1166 // The low bits of the first operand are unchanged by the srem.
1167 KnownZero = KnownZero2 & LowBits;
1168 KnownOne = KnownOne2 & LowBits;
1170 // If the first operand is non-negative or has all low bits zero, then
1171 // the upper bits are all zero.
1172 if (KnownZero2.isNegative() || ((KnownZero2 & LowBits) == LowBits))
1173 KnownZero |= ~LowBits;
1175 // If the first operand is negative and not all low bits are zero, then
1176 // the upper bits are all one.
1177 if (KnownOne2.isNegative() && ((KnownOne2 & LowBits) != 0))
1178 KnownOne |= ~LowBits;
1180 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1184 // The sign bit is the LHS's sign bit, except when the result of the
1185 // remainder is zero.
1186 if (KnownZero.isNonNegative()) {
1187 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1188 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1190 // If it's known zero, our sign bit is also zero.
1191 if (LHSKnownZero.isNegative())
1192 KnownZero.setSignBit();
1196 case Instruction::URem: {
1197 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1198 const APInt &RA = Rem->getValue();
1199 if (RA.isPowerOf2()) {
1200 APInt LowBits = (RA - 1);
1201 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1202 KnownZero |= ~LowBits;
1203 KnownOne &= LowBits;
1208 // Since the result is less than or equal to either operand, any leading
1209 // zero bits in either operand must also exist in the result.
1210 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1211 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1213 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1214 KnownZero2.countLeadingOnes());
1215 KnownOne.clearAllBits();
1216 KnownZero.clearAllBits();
1217 KnownZero.setHighBits(Leaders);
1221 case Instruction::Alloca: {
1222 const AllocaInst *AI = cast<AllocaInst>(I);
1223 unsigned Align = AI->getAlignment();
1225 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1228 KnownZero.setLowBits(countTrailingZeros(Align));
1231 case Instruction::GetElementPtr: {
1232 // Analyze all of the subscripts of this getelementptr instruction
1233 // to determine if we can prove known low zero bits.
1234 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1235 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1237 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1239 gep_type_iterator GTI = gep_type_begin(I);
1240 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1241 Value *Index = I->getOperand(i);
1242 if (StructType *STy = GTI.getStructTypeOrNull()) {
1243 // Handle struct member offset arithmetic.
1245 // Handle case when index is vector zeroinitializer
1246 Constant *CIndex = cast<Constant>(Index);
1247 if (CIndex->isZeroValue())
1250 if (CIndex->getType()->isVectorTy())
1251 Index = CIndex->getSplatValue();
1253 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1254 const StructLayout *SL = Q.DL.getStructLayout(STy);
1255 uint64_t Offset = SL->getElementOffset(Idx);
1256 TrailZ = std::min<unsigned>(TrailZ,
1257 countTrailingZeros(Offset));
1259 // Handle array index arithmetic.
1260 Type *IndexedTy = GTI.getIndexedType();
1261 if (!IndexedTy->isSized()) {
1265 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1266 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1267 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1268 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1269 TrailZ = std::min(TrailZ,
1270 unsigned(countTrailingZeros(TypeSize) +
1271 LocalKnownZero.countTrailingOnes()));
1275 KnownZero.setLowBits(TrailZ);
1278 case Instruction::PHI: {
1279 const PHINode *P = cast<PHINode>(I);
1280 // Handle the case of a simple two-predecessor recurrence PHI.
1281 // There's a lot more that could theoretically be done here, but
1282 // this is sufficient to catch some interesting cases.
1283 if (P->getNumIncomingValues() == 2) {
1284 for (unsigned i = 0; i != 2; ++i) {
1285 Value *L = P->getIncomingValue(i);
1286 Value *R = P->getIncomingValue(!i);
1287 Operator *LU = dyn_cast<Operator>(L);
1290 unsigned Opcode = LU->getOpcode();
1291 // Check for operations that have the property that if
1292 // both their operands have low zero bits, the result
1293 // will have low zero bits.
1294 if (Opcode == Instruction::Add ||
1295 Opcode == Instruction::Sub ||
1296 Opcode == Instruction::And ||
1297 Opcode == Instruction::Or ||
1298 Opcode == Instruction::Mul) {
1299 Value *LL = LU->getOperand(0);
1300 Value *LR = LU->getOperand(1);
1301 // Find a recurrence.
1308 // Ok, we have a PHI of the form L op= R. Check for low
1310 computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1312 // We need to take the minimum number of known bits
1313 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1314 computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1316 KnownZero.setLowBits(std::min(KnownZero2.countTrailingOnes(),
1317 KnownZero3.countTrailingOnes()));
1319 if (DontImproveNonNegativePhiBits)
1322 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1323 if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1324 // If initial value of recurrence is nonnegative, and we are adding
1325 // a nonnegative number with nsw, the result can only be nonnegative
1326 // or poison value regardless of the number of times we execute the
1327 // add in phi recurrence. If initial value is negative and we are
1328 // adding a negative number with nsw, the result can only be
1329 // negative or poison value. Similar arguments apply to sub and mul.
1331 // (add non-negative, non-negative) --> non-negative
1332 // (add negative, negative) --> negative
1333 if (Opcode == Instruction::Add) {
1334 if (KnownZero2.isNegative() && KnownZero3.isNegative())
1335 KnownZero.setSignBit();
1336 else if (KnownOne2.isNegative() && KnownOne3.isNegative())
1337 KnownOne.setSignBit();
1340 // (sub nsw non-negative, negative) --> non-negative
1341 // (sub nsw negative, non-negative) --> negative
1342 else if (Opcode == Instruction::Sub && LL == I) {
1343 if (KnownZero2.isNegative() && KnownOne3.isNegative())
1344 KnownZero.setSignBit();
1345 else if (KnownOne2.isNegative() && KnownZero3.isNegative())
1346 KnownOne.setSignBit();
1349 // (mul nsw non-negative, non-negative) --> non-negative
1350 else if (Opcode == Instruction::Mul && KnownZero2.isNegative() &&
1351 KnownZero3.isNegative())
1352 KnownZero.setSignBit();
1360 // Unreachable blocks may have zero-operand PHI nodes.
1361 if (P->getNumIncomingValues() == 0)
1364 // Otherwise take the unions of the known bit sets of the operands,
1365 // taking conservative care to avoid excessive recursion.
1366 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1367 // Skip if every incoming value references to ourself.
1368 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1371 KnownZero.setAllBits();
1372 KnownOne.setAllBits();
1373 for (Value *IncValue : P->incoming_values()) {
1374 // Skip direct self references.
1375 if (IncValue == P) continue;
1377 KnownZero2 = APInt(BitWidth, 0);
1378 KnownOne2 = APInt(BitWidth, 0);
1379 // Recurse, but cap the recursion to one level, because we don't
1380 // want to waste time spinning around in loops.
1381 computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1382 KnownZero &= KnownZero2;
1383 KnownOne &= KnownOne2;
1384 // If all bits have been ruled out, there's no need to check
1386 if (!KnownZero && !KnownOne)
1392 case Instruction::Call:
1393 case Instruction::Invoke:
1394 // If range metadata is attached to this call, set known bits from that,
1395 // and then intersect with known bits based on other properties of the
1397 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1398 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1399 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1400 computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
1401 KnownZero |= KnownZero2;
1402 KnownOne |= KnownOne2;
1404 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1405 switch (II->getIntrinsicID()) {
1407 case Intrinsic::bitreverse:
1408 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1409 KnownZero |= KnownZero2.reverseBits();
1410 KnownOne |= KnownOne2.reverseBits();
1412 case Intrinsic::bswap:
1413 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1414 KnownZero |= KnownZero2.byteSwap();
1415 KnownOne |= KnownOne2.byteSwap();
1417 case Intrinsic::ctlz:
1418 case Intrinsic::cttz: {
1419 unsigned LowBits = Log2_32(BitWidth)+1;
1420 // If this call is undefined for 0, the result will be less than 2^n.
1421 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1423 KnownZero.setBitsFrom(LowBits);
1426 case Intrinsic::ctpop: {
1427 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1428 // We can bound the space the count needs. Also, bits known to be zero
1429 // can't contribute to the population.
1430 unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1431 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1432 KnownZero.setBitsFrom(LowBits);
1433 // TODO: we could bound KnownOne using the lower bound on the number
1434 // of bits which might be set provided by popcnt KnownOne2.
1437 case Intrinsic::x86_sse42_crc32_64_64:
1438 KnownZero.setBitsFrom(32);
1443 case Instruction::ExtractElement:
1444 // Look through extract element. At the moment we keep this simple and skip
1445 // tracking the specific element. But at least we might find information
1446 // valid for all elements of the vector (for example if vector is sign
1447 // extended, shifted, etc).
1448 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1450 case Instruction::ExtractValue:
1451 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1452 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1453 if (EVI->getNumIndices() != 1) break;
1454 if (EVI->getIndices()[0] == 0) {
1455 switch (II->getIntrinsicID()) {
1457 case Intrinsic::uadd_with_overflow:
1458 case Intrinsic::sadd_with_overflow:
1459 computeKnownBitsAddSub(true, II->getArgOperand(0),
1460 II->getArgOperand(1), false, KnownZero,
1461 KnownOne, KnownZero2, KnownOne2, Depth, Q);
1463 case Intrinsic::usub_with_overflow:
1464 case Intrinsic::ssub_with_overflow:
1465 computeKnownBitsAddSub(false, II->getArgOperand(0),
1466 II->getArgOperand(1), false, KnownZero,
1467 KnownOne, KnownZero2, KnownOne2, Depth, Q);
1469 case Intrinsic::umul_with_overflow:
1470 case Intrinsic::smul_with_overflow:
1471 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1472 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1481 /// Determine which bits of V are known to be either zero or one and return
1482 /// them in the KnownZero/KnownOne bit sets.
1484 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1485 /// we cannot optimize based on the assumption that it is zero without changing
1486 /// it to be an explicit zero. If we don't change it to zero, other code could
1487 /// optimized based on the contradictory assumption that it is non-zero.
1488 /// Because instcombine aggressively folds operations with undef args anyway,
1489 /// this won't lose us code quality.
1491 /// This function is defined on values with integer type, values with pointer
1492 /// type, and vectors of integers. In the case
1493 /// where V is a vector, known zero, and known one values are the
1494 /// same width as the vector element, and the bit is set only if it is true
1495 /// for all of the elements in the vector.
1496 void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
1497 unsigned Depth, const Query &Q) {
1498 assert(V && "No Value?");
1499 assert(Depth <= MaxDepth && "Limit Search Depth");
1500 unsigned BitWidth = KnownZero.getBitWidth();
1502 assert((V->getType()->isIntOrIntVectorTy() ||
1503 V->getType()->getScalarType()->isPointerTy()) &&
1504 "Not integer or pointer type!");
1505 assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1506 (!V->getType()->isIntOrIntVectorTy() ||
1507 V->getType()->getScalarSizeInBits() == BitWidth) &&
1508 KnownZero.getBitWidth() == BitWidth &&
1509 KnownOne.getBitWidth() == BitWidth &&
1510 "V, KnownOne and KnownZero should have same BitWidth");
1514 if (match(V, m_APInt(C))) {
1515 // We know all of the bits for a scalar constant or a splat vector constant!
1517 KnownZero = ~KnownOne;
1520 // Null and aggregate-zero are all-zeros.
1521 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1522 KnownOne.clearAllBits();
1523 KnownZero.setAllBits();
1526 // Handle a constant vector by taking the intersection of the known bits of
1528 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1529 // We know that CDS must be a vector of integers. Take the intersection of
1531 KnownZero.setAllBits(); KnownOne.setAllBits();
1532 APInt Elt(KnownZero.getBitWidth(), 0);
1533 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1534 Elt = CDS->getElementAsInteger(i);
1541 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1542 // We know that CV must be a vector of integers. Take the intersection of
1544 KnownZero.setAllBits(); KnownOne.setAllBits();
1545 APInt Elt(KnownZero.getBitWidth(), 0);
1546 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1547 Constant *Element = CV->getAggregateElement(i);
1548 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1550 KnownZero.clearAllBits();
1551 KnownOne.clearAllBits();
1554 Elt = ElementCI->getValue();
1561 // Start out not knowing anything.
1562 KnownZero.clearAllBits(); KnownOne.clearAllBits();
1564 // We can't imply anything about undefs.
1565 if (isa<UndefValue>(V))
1568 // There's no point in looking through other users of ConstantData for
1569 // assumptions. Confirm that we've handled them all.
1570 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1572 // Limit search depth.
1573 // All recursive calls that increase depth must come after this.
1574 if (Depth == MaxDepth)
1577 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1578 // the bits of its aliasee.
1579 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1580 if (!GA->isInterposable())
1581 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1585 if (const Operator *I = dyn_cast<Operator>(V))
1586 computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1588 // Aligned pointers have trailing zeros - refine KnownZero set
1589 if (V->getType()->isPointerTy()) {
1590 unsigned Align = V->getPointerAlignment(Q.DL);
1592 KnownZero.setLowBits(countTrailingZeros(Align));
1595 // computeKnownBitsFromAssume strictly refines KnownZero and
1596 // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1598 // Check whether a nearby assume intrinsic can determine some known bits.
1599 computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1601 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1604 /// Determine whether the sign bit is known to be zero or one.
1605 /// Convenience wrapper around computeKnownBits.
1606 void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
1607 unsigned Depth, const Query &Q) {
1608 unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1614 APInt ZeroBits(BitWidth, 0);
1615 APInt OneBits(BitWidth, 0);
1616 computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1617 KnownOne = OneBits.isNegative();
1618 KnownZero = ZeroBits.isNegative();
1621 /// Return true if the given value is known to have exactly one
1622 /// bit set when defined. For vectors return true if every element is known to
1623 /// be a power of two when defined. Supports values with integer or pointer
1624 /// types and vectors of integers.
1625 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1627 if (const Constant *C = dyn_cast<Constant>(V)) {
1628 if (C->isNullValue())
1631 const APInt *ConstIntOrConstSplatInt;
1632 if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1633 return ConstIntOrConstSplatInt->isPowerOf2();
1636 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1637 // it is shifted off the end then the result is undefined.
1638 if (match(V, m_Shl(m_One(), m_Value())))
1641 // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1642 // bottom. If it is shifted off the bottom then the result is undefined.
1643 if (match(V, m_LShr(m_SignBit(), m_Value())))
1646 // The remaining tests are all recursive, so bail out if we hit the limit.
1647 if (Depth++ == MaxDepth)
1650 Value *X = nullptr, *Y = nullptr;
1651 // A shift left or a logical shift right of a power of two is a power of two
1653 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1654 match(V, m_LShr(m_Value(X), m_Value()))))
1655 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1657 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1658 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1660 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1661 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1662 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1664 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1665 // A power of two and'd with anything is a power of two or zero.
1666 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1667 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1669 // X & (-X) is always a power of two or zero.
1670 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1675 // Adding a power-of-two or zero to the same power-of-two or zero yields
1676 // either the original power-of-two, a larger power-of-two or zero.
1677 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1678 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1679 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1680 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1681 match(X, m_And(m_Value(), m_Specific(Y))))
1682 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1684 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1685 match(Y, m_And(m_Value(), m_Specific(X))))
1686 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1689 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1690 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1691 computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1693 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1694 computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1695 // If i8 V is a power of two or zero:
1696 // ZeroBits: 1 1 1 0 1 1 1 1
1697 // ~ZeroBits: 0 0 0 1 0 0 0 0
1698 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1699 // If OrZero isn't set, we cannot give back a zero result.
1700 // Make sure either the LHS or RHS has a bit set.
1701 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1706 // An exact divide or right shift can only shift off zero bits, so the result
1707 // is a power of two only if the first operand is a power of two and not
1708 // copying a sign bit (sdiv int_min, 2).
1709 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1710 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1711 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1718 /// \brief Test whether a GEP's result is known to be non-null.
1720 /// Uses properties inherent in a GEP to try to determine whether it is known
1723 /// Currently this routine does not support vector GEPs.
1724 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1726 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1729 // FIXME: Support vector-GEPs.
1730 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1732 // If the base pointer is non-null, we cannot walk to a null address with an
1733 // inbounds GEP in address space zero.
1734 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1737 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1738 // If so, then the GEP cannot produce a null pointer, as doing so would
1739 // inherently violate the inbounds contract within address space zero.
1740 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1741 GTI != GTE; ++GTI) {
1742 // Struct types are easy -- they must always be indexed by a constant.
1743 if (StructType *STy = GTI.getStructTypeOrNull()) {
1744 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1745 unsigned ElementIdx = OpC->getZExtValue();
1746 const StructLayout *SL = Q.DL.getStructLayout(STy);
1747 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1748 if (ElementOffset > 0)
1753 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1754 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1757 // Fast path the constant operand case both for efficiency and so we don't
1758 // increment Depth when just zipping down an all-constant GEP.
1759 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1765 // We post-increment Depth here because while isKnownNonZero increments it
1766 // as well, when we pop back up that increment won't persist. We don't want
1767 // to recurse 10k times just because we have 10k GEP operands. We don't
1768 // bail completely out because we want to handle constant GEPs regardless
1770 if (Depth++ >= MaxDepth)
1773 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1780 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1781 /// ensure that the value it's attached to is never Value? 'RangeType' is
1782 /// is the type of the value described by the range.
1783 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1784 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1785 assert(NumRanges >= 1);
1786 for (unsigned i = 0; i < NumRanges; ++i) {
1787 ConstantInt *Lower =
1788 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1789 ConstantInt *Upper =
1790 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1791 ConstantRange Range(Lower->getValue(), Upper->getValue());
1792 if (Range.contains(Value))
1798 /// Return true if the given value is known to be non-zero when defined. For
1799 /// vectors, return true if every element is known to be non-zero when
1800 /// defined. For pointers, if the context instruction and dominator tree are
1801 /// specified, perform context-sensitive analysis and return true if the
1802 /// pointer couldn't possibly be null at the specified instruction.
1803 /// Supports values with integer or pointer type and vectors of integers.
1804 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1805 if (auto *C = dyn_cast<Constant>(V)) {
1806 if (C->isNullValue())
1808 if (isa<ConstantInt>(C))
1809 // Must be non-zero due to null test above.
1812 // For constant vectors, check that all elements are undefined or known
1813 // non-zero to determine that the whole vector is known non-zero.
1814 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1815 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1816 Constant *Elt = C->getAggregateElement(i);
1817 if (!Elt || Elt->isNullValue())
1819 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1828 if (auto *I = dyn_cast<Instruction>(V)) {
1829 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1830 // If the possible ranges don't contain zero, then the value is
1831 // definitely non-zero.
1832 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1833 const APInt ZeroValue(Ty->getBitWidth(), 0);
1834 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1840 // The remaining tests are all recursive, so bail out if we hit the limit.
1841 if (Depth++ >= MaxDepth)
1844 // Check for pointer simplifications.
1845 if (V->getType()->isPointerTy()) {
1846 if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
1848 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1849 if (isGEPKnownNonNull(GEP, Depth, Q))
1853 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1855 // X | Y != 0 if X != 0 or Y != 0.
1856 Value *X = nullptr, *Y = nullptr;
1857 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1858 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1860 // ext X != 0 if X != 0.
1861 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1862 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1864 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1865 // if the lowest bit is shifted off the end.
1866 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1867 // shl nuw can't remove any non-zero bits.
1868 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1869 if (BO->hasNoUnsignedWrap())
1870 return isKnownNonZero(X, Depth, Q);
1872 APInt KnownZero(BitWidth, 0);
1873 APInt KnownOne(BitWidth, 0);
1874 computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1878 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1879 // defined if the sign bit is shifted off the end.
1880 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1881 // shr exact can only shift out zero bits.
1882 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1884 return isKnownNonZero(X, Depth, Q);
1886 bool XKnownNonNegative, XKnownNegative;
1887 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1891 // If the shifter operand is a constant, and all of the bits shifted
1892 // out are known to be zero, and X is known non-zero then at least one
1893 // non-zero bit must remain.
1894 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1895 APInt KnownZero(BitWidth, 0);
1896 APInt KnownOne(BitWidth, 0);
1897 computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1899 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1900 // Is there a known one in the portion not shifted out?
1901 if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1903 // Are all the bits to be shifted out known zero?
1904 if (KnownZero.countTrailingOnes() >= ShiftVal)
1905 return isKnownNonZero(X, Depth, Q);
1908 // div exact can only produce a zero if the dividend is zero.
1909 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1910 return isKnownNonZero(X, Depth, Q);
1913 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1914 bool XKnownNonNegative, XKnownNegative;
1915 bool YKnownNonNegative, YKnownNegative;
1916 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1917 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1919 // If X and Y are both non-negative (as signed values) then their sum is not
1920 // zero unless both X and Y are zero.
1921 if (XKnownNonNegative && YKnownNonNegative)
1922 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1925 // If X and Y are both negative (as signed values) then their sum is not
1926 // zero unless both X and Y equal INT_MIN.
1927 if (BitWidth && XKnownNegative && YKnownNegative) {
1928 APInt KnownZero(BitWidth, 0);
1929 APInt KnownOne(BitWidth, 0);
1930 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1931 // The sign bit of X is set. If some other bit is set then X is not equal
1933 computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1934 if ((KnownOne & Mask) != 0)
1936 // The sign bit of Y is set. If some other bit is set then Y is not equal
1938 computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1939 if ((KnownOne & Mask) != 0)
1943 // The sum of a non-negative number and a power of two is not zero.
1944 if (XKnownNonNegative &&
1945 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1947 if (YKnownNonNegative &&
1948 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1952 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1953 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1954 // If X and Y are non-zero then so is X * Y as long as the multiplication
1955 // does not overflow.
1956 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1957 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1960 // (C ? X : Y) != 0 if X != 0 and Y != 0.
1961 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1962 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1963 isKnownNonZero(SI->getFalseValue(), Depth, Q))
1967 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1968 // Try and detect a recurrence that monotonically increases from a
1969 // starting value, as these are common as induction variables.
1970 if (PN->getNumIncomingValues() == 2) {
1971 Value *Start = PN->getIncomingValue(0);
1972 Value *Induction = PN->getIncomingValue(1);
1973 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1974 std::swap(Start, Induction);
1975 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1976 if (!C->isZero() && !C->isNegative()) {
1978 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1979 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1985 // Check if all incoming values are non-zero constant.
1986 bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1987 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1989 if (AllNonZeroConstants)
1993 if (!BitWidth) return false;
1994 APInt KnownZero(BitWidth, 0);
1995 APInt KnownOne(BitWidth, 0);
1996 computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1997 return KnownOne != 0;
2000 /// Return true if V2 == V1 + X, where X is known non-zero.
2001 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2002 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2003 if (!BO || BO->getOpcode() != Instruction::Add)
2005 Value *Op = nullptr;
2006 if (V2 == BO->getOperand(0))
2007 Op = BO->getOperand(1);
2008 else if (V2 == BO->getOperand(1))
2009 Op = BO->getOperand(0);
2012 return isKnownNonZero(Op, 0, Q);
2015 /// Return true if it is known that V1 != V2.
2016 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2017 if (V1->getType()->isVectorTy() || V1 == V2)
2019 if (V1->getType() != V2->getType())
2020 // We can't look through casts yet.
2022 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2025 if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
2026 // Are any known bits in V1 contradictory to known bits in V2? If V1
2027 // has a known zero where V2 has a known one, they must not be equal.
2028 auto BitWidth = Ty->getBitWidth();
2029 APInt KnownZero1(BitWidth, 0);
2030 APInt KnownOne1(BitWidth, 0);
2031 computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
2032 APInt KnownZero2(BitWidth, 0);
2033 APInt KnownOne2(BitWidth, 0);
2034 computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
2036 auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
2037 if (OppositeBits.getBoolValue())
2043 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2044 /// simplify operations downstream. Mask is known to be zero for bits that V
2047 /// This function is defined on values with integer type, values with pointer
2048 /// type, and vectors of integers. In the case
2049 /// where V is a vector, the mask, known zero, and known one values are the
2050 /// same width as the vector element, and the bit is set only if it is true
2051 /// for all of the elements in the vector.
2052 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2054 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
2055 computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2056 return (KnownZero & Mask) == Mask;
2059 /// For vector constants, loop over the elements and find the constant with the
2060 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2061 /// or if any element was not analyzed; otherwise, return the count for the
2062 /// element with the minimum number of sign bits.
2063 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2065 const auto *CV = dyn_cast<Constant>(V);
2066 if (!CV || !CV->getType()->isVectorTy())
2069 unsigned MinSignBits = TyBits;
2070 unsigned NumElts = CV->getType()->getVectorNumElements();
2071 for (unsigned i = 0; i != NumElts; ++i) {
2072 // If we find a non-ConstantInt, bail out.
2073 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2077 // If the sign bit is 1, flip the bits, so we always count leading zeros.
2078 APInt EltVal = Elt->getValue();
2079 if (EltVal.isNegative())
2081 MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2087 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2090 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2092 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2093 assert(Result > 0 && "At least one sign bit needs to be present!");
2097 /// Return the number of times the sign bit of the register is replicated into
2098 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2099 /// (itself), but other cases can give us information. For example, immediately
2100 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2101 /// other, so we return 3. For vectors, return the number of sign bits for the
2102 /// vector element with the mininum number of known sign bits.
2103 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2106 // We return the minimum number of sign bits that are guaranteed to be present
2107 // in V, so for undef we have to conservatively return 1. We don't have the
2108 // same behavior for poison though -- that's a FIXME today.
2110 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2112 unsigned FirstAnswer = 1;
2114 // Note that ConstantInt is handled by the general computeKnownBits case
2117 if (Depth == MaxDepth)
2118 return 1; // Limit search depth.
2120 const Operator *U = dyn_cast<Operator>(V);
2121 switch (Operator::getOpcode(V)) {
2123 case Instruction::SExt:
2124 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2125 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2127 case Instruction::SDiv: {
2128 const APInt *Denominator;
2129 // sdiv X, C -> adds log(C) sign bits.
2130 if (match(U->getOperand(1), m_APInt(Denominator))) {
2132 // Ignore non-positive denominator.
2133 if (!Denominator->isStrictlyPositive())
2136 // Calculate the incoming numerator bits.
2137 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2139 // Add floor(log(C)) bits to the numerator bits.
2140 return std::min(TyBits, NumBits + Denominator->logBase2());
2145 case Instruction::SRem: {
2146 const APInt *Denominator;
2147 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2148 // positive constant. This let us put a lower bound on the number of sign
2150 if (match(U->getOperand(1), m_APInt(Denominator))) {
2152 // Ignore non-positive denominator.
2153 if (!Denominator->isStrictlyPositive())
2156 // Calculate the incoming numerator bits. SRem by a positive constant
2157 // can't lower the number of sign bits.
2159 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2161 // Calculate the leading sign bit constraints by examining the
2162 // denominator. Given that the denominator is positive, there are two
2165 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2166 // (1 << ceilLogBase2(C)).
2168 // 2. the numerator is negative. Then the result range is (-C,0] and
2169 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2171 // Thus a lower bound on the number of sign bits is `TyBits -
2172 // ceilLogBase2(C)`.
2174 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2175 return std::max(NumrBits, ResBits);
2180 case Instruction::AShr: {
2181 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2182 // ashr X, C -> adds C sign bits. Vectors too.
2184 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2185 unsigned ShAmtLimited = ShAmt->getZExtValue();
2186 if (ShAmtLimited >= TyBits)
2187 break; // Bad shift.
2188 Tmp += ShAmtLimited;
2189 if (Tmp > TyBits) Tmp = TyBits;
2193 case Instruction::Shl: {
2195 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2196 // shl destroys sign bits.
2197 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2198 Tmp2 = ShAmt->getZExtValue();
2199 if (Tmp2 >= TyBits || // Bad shift.
2200 Tmp2 >= Tmp) break; // Shifted all sign bits out.
2205 case Instruction::And:
2206 case Instruction::Or:
2207 case Instruction::Xor: // NOT is handled here.
2208 // Logical binary ops preserve the number of sign bits at the worst.
2209 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2211 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2212 FirstAnswer = std::min(Tmp, Tmp2);
2213 // We computed what we know about the sign bits as our first
2214 // answer. Now proceed to the generic code that uses
2215 // computeKnownBits, and pick whichever answer is better.
2219 case Instruction::Select:
2220 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2221 if (Tmp == 1) return 1; // Early out.
2222 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2223 return std::min(Tmp, Tmp2);
2225 case Instruction::Add:
2226 // Add can have at most one carry bit. Thus we know that the output
2227 // is, at worst, one more bit than the inputs.
2228 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2229 if (Tmp == 1) return 1; // Early out.
2231 // Special case decrementing a value (ADD X, -1):
2232 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2233 if (CRHS->isAllOnesValue()) {
2234 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2235 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2237 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2239 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2242 // If we are subtracting one from a positive number, there is no carry
2243 // out of the result.
2244 if (KnownZero.isNegative())
2248 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2249 if (Tmp2 == 1) return 1;
2250 return std::min(Tmp, Tmp2)-1;
2252 case Instruction::Sub:
2253 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2254 if (Tmp2 == 1) return 1;
2257 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2258 if (CLHS->isNullValue()) {
2259 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2260 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2261 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2263 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2266 // If the input is known to be positive (the sign bit is known clear),
2267 // the output of the NEG has the same number of sign bits as the input.
2268 if (KnownZero.isNegative())
2271 // Otherwise, we treat this like a SUB.
2274 // Sub can have at most one carry bit. Thus we know that the output
2275 // is, at worst, one more bit than the inputs.
2276 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2277 if (Tmp == 1) return 1; // Early out.
2278 return std::min(Tmp, Tmp2)-1;
2280 case Instruction::PHI: {
2281 const PHINode *PN = cast<PHINode>(U);
2282 unsigned NumIncomingValues = PN->getNumIncomingValues();
2283 // Don't analyze large in-degree PHIs.
2284 if (NumIncomingValues > 4) break;
2285 // Unreachable blocks may have zero-operand PHI nodes.
2286 if (NumIncomingValues == 0) break;
2288 // Take the minimum of all incoming values. This can't infinitely loop
2289 // because of our depth threshold.
2290 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2291 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2292 if (Tmp == 1) return Tmp;
2294 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2299 case Instruction::Trunc:
2300 // FIXME: it's tricky to do anything useful for this, but it is an important
2301 // case for targets like X86.
2304 case Instruction::ExtractElement:
2305 // Look through extract element. At the moment we keep this simple and skip
2306 // tracking the specific element. But at least we might find information
2307 // valid for all elements of the vector (for example if vector is sign
2308 // extended, shifted, etc).
2309 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2312 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2313 // use this information.
2315 // If we can examine all elements of a vector constant successfully, we're
2316 // done (we can't do any better than that). If not, keep trying.
2317 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2320 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2321 computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2323 // If we know that the sign bit is either zero or one, determine the number of
2324 // identical bits in the top of the input value.
2325 if (KnownZero.isNegative())
2326 return std::max(FirstAnswer, KnownZero.countLeadingOnes());
2328 if (KnownOne.isNegative())
2329 return std::max(FirstAnswer, KnownOne.countLeadingOnes());
2331 // computeKnownBits gave us no extra information about the top bits.
2335 /// This function computes the integer multiple of Base that equals V.
2336 /// If successful, it returns true and returns the multiple in
2337 /// Multiple. If unsuccessful, it returns false. It looks
2338 /// through SExt instructions only if LookThroughSExt is true.
2339 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2340 bool LookThroughSExt, unsigned Depth) {
2341 const unsigned MaxDepth = 6;
2343 assert(V && "No Value?");
2344 assert(Depth <= MaxDepth && "Limit Search Depth");
2345 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2347 Type *T = V->getType();
2349 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2359 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2360 Constant *BaseVal = ConstantInt::get(T, Base);
2361 if (CO && CO == BaseVal) {
2363 Multiple = ConstantInt::get(T, 1);
2367 if (CI && CI->getZExtValue() % Base == 0) {
2368 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2372 if (Depth == MaxDepth) return false; // Limit search depth.
2374 Operator *I = dyn_cast<Operator>(V);
2375 if (!I) return false;
2377 switch (I->getOpcode()) {
2379 case Instruction::SExt:
2380 if (!LookThroughSExt) return false;
2381 // otherwise fall through to ZExt
2382 case Instruction::ZExt:
2383 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2384 LookThroughSExt, Depth+1);
2385 case Instruction::Shl:
2386 case Instruction::Mul: {
2387 Value *Op0 = I->getOperand(0);
2388 Value *Op1 = I->getOperand(1);
2390 if (I->getOpcode() == Instruction::Shl) {
2391 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2392 if (!Op1CI) return false;
2393 // Turn Op0 << Op1 into Op0 * 2^Op1
2394 APInt Op1Int = Op1CI->getValue();
2395 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2396 APInt API(Op1Int.getBitWidth(), 0);
2397 API.setBit(BitToSet);
2398 Op1 = ConstantInt::get(V->getContext(), API);
2401 Value *Mul0 = nullptr;
2402 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2403 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2404 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2405 if (Op1C->getType()->getPrimitiveSizeInBits() <
2406 MulC->getType()->getPrimitiveSizeInBits())
2407 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2408 if (Op1C->getType()->getPrimitiveSizeInBits() >
2409 MulC->getType()->getPrimitiveSizeInBits())
2410 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2412 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2413 Multiple = ConstantExpr::getMul(MulC, Op1C);
2417 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2418 if (Mul0CI->getValue() == 1) {
2419 // V == Base * Op1, so return Op1
2425 Value *Mul1 = nullptr;
2426 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2427 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2428 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2429 if (Op0C->getType()->getPrimitiveSizeInBits() <
2430 MulC->getType()->getPrimitiveSizeInBits())
2431 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2432 if (Op0C->getType()->getPrimitiveSizeInBits() >
2433 MulC->getType()->getPrimitiveSizeInBits())
2434 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2436 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2437 Multiple = ConstantExpr::getMul(MulC, Op0C);
2441 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2442 if (Mul1CI->getValue() == 1) {
2443 // V == Base * Op0, so return Op0
2451 // We could not determine if V is a multiple of Base.
2455 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2456 const TargetLibraryInfo *TLI) {
2457 const Function *F = ICS.getCalledFunction();
2459 return Intrinsic::not_intrinsic;
2461 if (F->isIntrinsic())
2462 return F->getIntrinsicID();
2465 return Intrinsic::not_intrinsic;
2468 // We're going to make assumptions on the semantics of the functions, check
2469 // that the target knows that it's available in this environment and it does
2470 // not have local linkage.
2471 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2472 return Intrinsic::not_intrinsic;
2474 if (!ICS.onlyReadsMemory())
2475 return Intrinsic::not_intrinsic;
2477 // Otherwise check if we have a call to a function that can be turned into a
2478 // vector intrinsic.
2485 return Intrinsic::sin;
2489 return Intrinsic::cos;
2493 return Intrinsic::exp;
2497 return Intrinsic::exp2;
2501 return Intrinsic::log;
2503 case LibFunc_log10f:
2504 case LibFunc_log10l:
2505 return Intrinsic::log10;
2509 return Intrinsic::log2;
2513 return Intrinsic::fabs;
2517 return Intrinsic::minnum;
2521 return Intrinsic::maxnum;
2522 case LibFunc_copysign:
2523 case LibFunc_copysignf:
2524 case LibFunc_copysignl:
2525 return Intrinsic::copysign;
2527 case LibFunc_floorf:
2528 case LibFunc_floorl:
2529 return Intrinsic::floor;
2533 return Intrinsic::ceil;
2535 case LibFunc_truncf:
2536 case LibFunc_truncl:
2537 return Intrinsic::trunc;
2541 return Intrinsic::rint;
2542 case LibFunc_nearbyint:
2543 case LibFunc_nearbyintf:
2544 case LibFunc_nearbyintl:
2545 return Intrinsic::nearbyint;
2547 case LibFunc_roundf:
2548 case LibFunc_roundl:
2549 return Intrinsic::round;
2553 return Intrinsic::pow;
2557 if (ICS->hasNoNaNs())
2558 return Intrinsic::sqrt;
2559 return Intrinsic::not_intrinsic;
2562 return Intrinsic::not_intrinsic;
2565 /// Return true if we can prove that the specified FP value is never equal to
2568 /// NOTE: this function will need to be revisited when we support non-default
2571 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2573 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2574 return !CFP->getValueAPF().isNegZero();
2576 if (Depth == MaxDepth)
2577 return false; // Limit search depth.
2579 const Operator *I = dyn_cast<Operator>(V);
2580 if (!I) return false;
2582 // Check if the nsz fast-math flag is set
2583 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2584 if (FPO->hasNoSignedZeros())
2587 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2588 if (I->getOpcode() == Instruction::FAdd)
2589 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2590 if (CFP->isNullValue())
2593 // sitofp and uitofp turn into +0.0 for zero.
2594 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2597 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2598 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2602 // sqrt(-0.0) = -0.0, no other negative results are possible.
2603 case Intrinsic::sqrt:
2604 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2606 case Intrinsic::fabs:
2614 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2615 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2616 /// bit despite comparing equal.
2617 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2618 const TargetLibraryInfo *TLI,
2621 // TODO: This function does not do the right thing when SignBitOnly is true
2622 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2623 // which flips the sign bits of NaNs. See
2624 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2626 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2627 return !CFP->getValueAPF().isNegative() ||
2628 (!SignBitOnly && CFP->getValueAPF().isZero());
2631 if (Depth == MaxDepth)
2632 return false; // Limit search depth.
2634 const Operator *I = dyn_cast<Operator>(V);
2638 switch (I->getOpcode()) {
2641 // Unsigned integers are always nonnegative.
2642 case Instruction::UIToFP:
2644 case Instruction::FMul:
2645 // x*x is always non-negative or a NaN.
2646 if (I->getOperand(0) == I->getOperand(1) &&
2647 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2651 case Instruction::FAdd:
2652 case Instruction::FDiv:
2653 case Instruction::FRem:
2654 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2656 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2658 case Instruction::Select:
2659 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2661 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2663 case Instruction::FPExt:
2664 case Instruction::FPTrunc:
2665 // Widening/narrowing never change sign.
2666 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2668 case Instruction::Call:
2669 const auto *CI = cast<CallInst>(I);
2670 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2674 case Intrinsic::maxnum:
2675 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2677 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2679 case Intrinsic::minnum:
2680 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2682 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2684 case Intrinsic::exp:
2685 case Intrinsic::exp2:
2686 case Intrinsic::fabs:
2689 case Intrinsic::sqrt:
2690 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2693 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2694 CannotBeNegativeZero(CI->getOperand(0), TLI));
2696 case Intrinsic::powi:
2697 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2698 // powi(x,n) is non-negative if n is even.
2699 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2702 // TODO: This is not correct. Given that exp is an integer, here are the
2703 // ways that pow can return a negative value:
2705 // pow(x, exp) --> negative if exp is odd and x is negative.
2706 // pow(-0, exp) --> -inf if exp is negative odd.
2707 // pow(-0, exp) --> -0 if exp is positive odd.
2708 // pow(-inf, exp) --> -0 if exp is negative odd.
2709 // pow(-inf, exp) --> -inf if exp is positive odd.
2711 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2712 // but we must return false if x == -0. Unfortunately we do not currently
2713 // have a way of expressing this constraint. See details in
2714 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2715 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2718 case Intrinsic::fma:
2719 case Intrinsic::fmuladd:
2720 // x*x+y is non-negative if y is non-negative.
2721 return I->getOperand(0) == I->getOperand(1) &&
2722 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2723 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2731 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2732 const TargetLibraryInfo *TLI) {
2733 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2736 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2737 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2740 /// If the specified value can be set by repeating the same byte in memory,
2741 /// return the i8 value that it is represented with. This is
2742 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2743 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2744 /// byte store (e.g. i16 0x1234), return null.
2745 Value *llvm::isBytewiseValue(Value *V) {
2746 // All byte-wide stores are splatable, even of arbitrary variables.
2747 if (V->getType()->isIntegerTy(8)) return V;
2749 // Handle 'null' ConstantArrayZero etc.
2750 if (Constant *C = dyn_cast<Constant>(V))
2751 if (C->isNullValue())
2752 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2754 // Constant float and double values can be handled as integer values if the
2755 // corresponding integer value is "byteable". An important case is 0.0.
2756 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2757 if (CFP->getType()->isFloatTy())
2758 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2759 if (CFP->getType()->isDoubleTy())
2760 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2761 // Don't handle long double formats, which have strange constraints.
2764 // We can handle constant integers that are multiple of 8 bits.
2765 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2766 if (CI->getBitWidth() % 8 == 0) {
2767 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2769 if (!CI->getValue().isSplat(8))
2771 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2775 // A ConstantDataArray/Vector is splatable if all its members are equal and
2777 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2778 Value *Elt = CA->getElementAsConstant(0);
2779 Value *Val = isBytewiseValue(Elt);
2783 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2784 if (CA->getElementAsConstant(I) != Elt)
2790 // Conceptually, we could handle things like:
2791 // %a = zext i8 %X to i16
2792 // %b = shl i16 %a, 8
2793 // %c = or i16 %a, %b
2794 // but until there is an example that actually needs this, it doesn't seem
2795 // worth worrying about.
2800 // This is the recursive version of BuildSubAggregate. It takes a few different
2801 // arguments. Idxs is the index within the nested struct From that we are
2802 // looking at now (which is of type IndexedType). IdxSkip is the number of
2803 // indices from Idxs that should be left out when inserting into the resulting
2804 // struct. To is the result struct built so far, new insertvalue instructions
2806 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2807 SmallVectorImpl<unsigned> &Idxs,
2809 Instruction *InsertBefore) {
2810 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2812 // Save the original To argument so we can modify it
2814 // General case, the type indexed by Idxs is a struct
2815 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2816 // Process each struct element recursively
2819 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2823 // Couldn't find any inserted value for this index? Cleanup
2824 while (PrevTo != OrigTo) {
2825 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2826 PrevTo = Del->getAggregateOperand();
2827 Del->eraseFromParent();
2829 // Stop processing elements
2833 // If we successfully found a value for each of our subaggregates
2837 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2838 // the struct's elements had a value that was inserted directly. In the latter
2839 // case, perhaps we can't determine each of the subelements individually, but
2840 // we might be able to find the complete struct somewhere.
2842 // Find the value that is at that particular spot
2843 Value *V = FindInsertedValue(From, Idxs);
2848 // Insert the value in the new (sub) aggregrate
2849 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2850 "tmp", InsertBefore);
2853 // This helper takes a nested struct and extracts a part of it (which is again a
2854 // struct) into a new value. For example, given the struct:
2855 // { a, { b, { c, d }, e } }
2856 // and the indices "1, 1" this returns
2859 // It does this by inserting an insertvalue for each element in the resulting
2860 // struct, as opposed to just inserting a single struct. This will only work if
2861 // each of the elements of the substruct are known (ie, inserted into From by an
2862 // insertvalue instruction somewhere).
2864 // All inserted insertvalue instructions are inserted before InsertBefore
2865 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2866 Instruction *InsertBefore) {
2867 assert(InsertBefore && "Must have someplace to insert!");
2868 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2870 Value *To = UndefValue::get(IndexedType);
2871 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2872 unsigned IdxSkip = Idxs.size();
2874 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2877 /// Given an aggregrate and an sequence of indices, see if
2878 /// the scalar value indexed is already around as a register, for example if it
2879 /// were inserted directly into the aggregrate.
2881 /// If InsertBefore is not null, this function will duplicate (modified)
2882 /// insertvalues when a part of a nested struct is extracted.
2883 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2884 Instruction *InsertBefore) {
2885 // Nothing to index? Just return V then (this is useful at the end of our
2887 if (idx_range.empty())
2889 // We have indices, so V should have an indexable type.
2890 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2891 "Not looking at a struct or array?");
2892 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2893 "Invalid indices for type?");
2895 if (Constant *C = dyn_cast<Constant>(V)) {
2896 C = C->getAggregateElement(idx_range[0]);
2897 if (!C) return nullptr;
2898 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2901 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2902 // Loop the indices for the insertvalue instruction in parallel with the
2903 // requested indices
2904 const unsigned *req_idx = idx_range.begin();
2905 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2906 i != e; ++i, ++req_idx) {
2907 if (req_idx == idx_range.end()) {
2908 // We can't handle this without inserting insertvalues
2912 // The requested index identifies a part of a nested aggregate. Handle
2913 // this specially. For example,
2914 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2915 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2916 // %C = extractvalue {i32, { i32, i32 } } %B, 1
2917 // This can be changed into
2918 // %A = insertvalue {i32, i32 } undef, i32 10, 0
2919 // %C = insertvalue {i32, i32 } %A, i32 11, 1
2920 // which allows the unused 0,0 element from the nested struct to be
2922 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2926 // This insert value inserts something else than what we are looking for.
2927 // See if the (aggregate) value inserted into has the value we are
2928 // looking for, then.
2930 return FindInsertedValue(I->getAggregateOperand(), idx_range,
2933 // If we end up here, the indices of the insertvalue match with those
2934 // requested (though possibly only partially). Now we recursively look at
2935 // the inserted value, passing any remaining indices.
2936 return FindInsertedValue(I->getInsertedValueOperand(),
2937 makeArrayRef(req_idx, idx_range.end()),
2941 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2942 // If we're extracting a value from an aggregate that was extracted from
2943 // something else, we can extract from that something else directly instead.
2944 // However, we will need to chain I's indices with the requested indices.
2946 // Calculate the number of indices required
2947 unsigned size = I->getNumIndices() + idx_range.size();
2948 // Allocate some space to put the new indices in
2949 SmallVector<unsigned, 5> Idxs;
2951 // Add indices from the extract value instruction
2952 Idxs.append(I->idx_begin(), I->idx_end());
2954 // Add requested indices
2955 Idxs.append(idx_range.begin(), idx_range.end());
2957 assert(Idxs.size() == size
2958 && "Number of indices added not correct?");
2960 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2962 // Otherwise, we don't know (such as, extracting from a function return value
2963 // or load instruction)
2967 /// Analyze the specified pointer to see if it can be expressed as a base
2968 /// pointer plus a constant offset. Return the base and offset to the caller.
2969 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2970 const DataLayout &DL) {
2971 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2972 APInt ByteOffset(BitWidth, 0);
2974 // We walk up the defs but use a visited set to handle unreachable code. In
2975 // that case, we stop after accumulating the cycle once (not that it
2977 SmallPtrSet<Value *, 16> Visited;
2978 while (Visited.insert(Ptr).second) {
2979 if (Ptr->getType()->isVectorTy())
2982 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2983 // If one of the values we have visited is an addrspacecast, then
2984 // the pointer type of this GEP may be different from the type
2985 // of the Ptr parameter which was passed to this function. This
2986 // means when we construct GEPOffset, we need to use the size
2987 // of GEP's pointer type rather than the size of the original
2989 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
2990 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2993 ByteOffset += GEPOffset.getSExtValue();
2995 Ptr = GEP->getPointerOperand();
2996 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2997 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2998 Ptr = cast<Operator>(Ptr)->getOperand(0);
2999 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3000 if (GA->isInterposable())
3002 Ptr = GA->getAliasee();
3007 Offset = ByteOffset.getSExtValue();
3011 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
3012 // Make sure the GEP has exactly three arguments.
3013 if (GEP->getNumOperands() != 3)
3016 // Make sure the index-ee is a pointer to array of i8.
3017 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3018 if (!AT || !AT->getElementType()->isIntegerTy(8))
3021 // Check to make sure that the first operand of the GEP is an integer and
3022 // has value 0 so that we are sure we're indexing into the initializer.
3023 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3024 if (!FirstIdx || !FirstIdx->isZero())
3030 /// This function computes the length of a null-terminated C string pointed to
3031 /// by V. If successful, it returns true and returns the string in Str.
3032 /// If unsuccessful, it returns false.
3033 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3034 uint64_t Offset, bool TrimAtNul) {
3037 // Look through bitcast instructions and geps.
3038 V = V->stripPointerCasts();
3040 // If the value is a GEP instruction or constant expression, treat it as an
3042 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3043 // The GEP operator should be based on a pointer to string constant, and is
3044 // indexing into the string constant.
3045 if (!isGEPBasedOnPointerToString(GEP))
3048 // If the second index isn't a ConstantInt, then this is a variable index
3049 // into the array. If this occurs, we can't say anything meaningful about
3051 uint64_t StartIdx = 0;
3052 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3053 StartIdx = CI->getZExtValue();
3056 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
3060 // The GEP instruction, constant or instruction, must reference a global
3061 // variable that is a constant and is initialized. The referenced constant
3062 // initializer is the array that we'll use for optimization.
3063 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3064 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3067 // Handle the all-zeros case.
3068 if (GV->getInitializer()->isNullValue()) {
3069 // This is a degenerate case. The initializer is constant zero so the
3070 // length of the string must be zero.
3075 // This must be a ConstantDataArray.
3076 const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3077 if (!Array || !Array->isString())
3080 // Get the number of elements in the array.
3081 uint64_t NumElts = Array->getType()->getArrayNumElements();
3083 // Start out with the entire array in the StringRef.
3084 Str = Array->getAsString();
3086 if (Offset > NumElts)
3089 // Skip over 'offset' bytes.
3090 Str = Str.substr(Offset);
3093 // Trim off the \0 and anything after it. If the array is not nul
3094 // terminated, we just return the whole end of string. The client may know
3095 // some other way that the string is length-bound.
3096 Str = Str.substr(0, Str.find('\0'));
3101 // These next two are very similar to the above, but also look through PHI
3103 // TODO: See if we can integrate these two together.
3105 /// If we can compute the length of the string pointed to by
3106 /// the specified pointer, return 'len+1'. If we can't, return 0.
3107 static uint64_t GetStringLengthH(const Value *V,
3108 SmallPtrSetImpl<const PHINode*> &PHIs) {
3109 // Look through noop bitcast instructions.
3110 V = V->stripPointerCasts();
3112 // If this is a PHI node, there are two cases: either we have already seen it
3114 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3115 if (!PHIs.insert(PN).second)
3116 return ~0ULL; // already in the set.
3118 // If it was new, see if all the input strings are the same length.
3119 uint64_t LenSoFar = ~0ULL;
3120 for (Value *IncValue : PN->incoming_values()) {
3121 uint64_t Len = GetStringLengthH(IncValue, PHIs);
3122 if (Len == 0) return 0; // Unknown length -> unknown.
3124 if (Len == ~0ULL) continue;
3126 if (Len != LenSoFar && LenSoFar != ~0ULL)
3127 return 0; // Disagree -> unknown.
3131 // Success, all agree.
3135 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3136 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3137 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
3138 if (Len1 == 0) return 0;
3139 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
3140 if (Len2 == 0) return 0;
3141 if (Len1 == ~0ULL) return Len2;
3142 if (Len2 == ~0ULL) return Len1;
3143 if (Len1 != Len2) return 0;
3147 // Otherwise, see if we can read the string.
3149 if (!getConstantStringInfo(V, StrData))
3152 return StrData.size()+1;
3155 /// If we can compute the length of the string pointed to by
3156 /// the specified pointer, return 'len+1'. If we can't, return 0.
3157 uint64_t llvm::GetStringLength(const Value *V) {
3158 if (!V->getType()->isPointerTy()) return 0;
3160 SmallPtrSet<const PHINode*, 32> PHIs;
3161 uint64_t Len = GetStringLengthH(V, PHIs);
3162 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3163 // an empty string as a length.
3164 return Len == ~0ULL ? 1 : Len;
3167 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
3168 /// previous iteration of the loop was referring to the same object as \p PN.
3169 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3170 const LoopInfo *LI) {
3171 // Find the loop-defined value.
3172 Loop *L = LI->getLoopFor(PN->getParent());
3173 if (PN->getNumIncomingValues() != 2)
3176 // Find the value from previous iteration.
3177 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3178 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3179 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3180 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3183 // If a new pointer is loaded in the loop, the pointer references a different
3184 // object in every iteration. E.g.:
3188 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3189 if (!L->isLoopInvariant(Load->getPointerOperand()))
3194 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3195 unsigned MaxLookup) {
3196 if (!V->getType()->isPointerTy())
3198 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3199 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3200 V = GEP->getPointerOperand();
3201 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3202 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3203 V = cast<Operator>(V)->getOperand(0);
3204 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3205 if (GA->isInterposable())
3207 V = GA->getAliasee();
3208 } else if (isa<AllocaInst>(V)) {
3209 // An alloca can't be further simplified.
3212 if (auto CS = CallSite(V))
3213 if (Value *RV = CS.getReturnedArgOperand()) {
3218 // See if InstructionSimplify knows any relevant tricks.
3219 if (Instruction *I = dyn_cast<Instruction>(V))
3220 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3221 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
3228 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3233 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3234 const DataLayout &DL, LoopInfo *LI,
3235 unsigned MaxLookup) {
3236 SmallPtrSet<Value *, 4> Visited;
3237 SmallVector<Value *, 4> Worklist;
3238 Worklist.push_back(V);
3240 Value *P = Worklist.pop_back_val();
3241 P = GetUnderlyingObject(P, DL, MaxLookup);
3243 if (!Visited.insert(P).second)
3246 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3247 Worklist.push_back(SI->getTrueValue());
3248 Worklist.push_back(SI->getFalseValue());
3252 if (PHINode *PN = dyn_cast<PHINode>(P)) {
3253 // If this PHI changes the underlying object in every iteration of the
3254 // loop, don't look through it. Consider:
3257 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3261 // Prev is tracking Curr one iteration behind so they refer to different
3262 // underlying objects.
3263 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3264 isSameUnderlyingObjectInLoop(PN, LI))
3265 for (Value *IncValue : PN->incoming_values())
3266 Worklist.push_back(IncValue);
3270 Objects.push_back(P);
3271 } while (!Worklist.empty());
3274 /// Return true if the only users of this pointer are lifetime markers.
3275 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3276 for (const User *U : V->users()) {
3277 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3278 if (!II) return false;
3280 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3281 II->getIntrinsicID() != Intrinsic::lifetime_end)
3287 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3288 const Instruction *CtxI,
3289 const DominatorTree *DT) {
3290 const Operator *Inst = dyn_cast<Operator>(V);
3294 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3295 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3299 switch (Inst->getOpcode()) {
3302 case Instruction::UDiv:
3303 case Instruction::URem: {
3304 // x / y is undefined if y == 0.
3306 if (match(Inst->getOperand(1), m_APInt(V)))
3310 case Instruction::SDiv:
3311 case Instruction::SRem: {
3312 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3313 const APInt *Numerator, *Denominator;
3314 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3316 // We cannot hoist this division if the denominator is 0.
3317 if (*Denominator == 0)
3319 // It's safe to hoist if the denominator is not 0 or -1.
3320 if (*Denominator != -1)
3322 // At this point we know that the denominator is -1. It is safe to hoist as
3323 // long we know that the numerator is not INT_MIN.
3324 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3325 return !Numerator->isMinSignedValue();
3326 // The numerator *might* be MinSignedValue.
3329 case Instruction::Load: {
3330 const LoadInst *LI = cast<LoadInst>(Inst);
3331 if (!LI->isUnordered() ||
3332 // Speculative load may create a race that did not exist in the source.
3333 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3334 // Speculative load may load data from dirty regions.
3335 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3337 const DataLayout &DL = LI->getModule()->getDataLayout();
3338 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3339 LI->getAlignment(), DL, CtxI, DT);
3341 case Instruction::Call: {
3342 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3343 switch (II->getIntrinsicID()) {
3344 // These synthetic intrinsics have no side-effects and just mark
3345 // information about their operands.
3346 // FIXME: There are other no-op synthetic instructions that potentially
3347 // should be considered at least *safe* to speculate...
3348 case Intrinsic::dbg_declare:
3349 case Intrinsic::dbg_value:
3352 case Intrinsic::bitreverse:
3353 case Intrinsic::bswap:
3354 case Intrinsic::ctlz:
3355 case Intrinsic::ctpop:
3356 case Intrinsic::cttz:
3357 case Intrinsic::objectsize:
3358 case Intrinsic::sadd_with_overflow:
3359 case Intrinsic::smul_with_overflow:
3360 case Intrinsic::ssub_with_overflow:
3361 case Intrinsic::uadd_with_overflow:
3362 case Intrinsic::umul_with_overflow:
3363 case Intrinsic::usub_with_overflow:
3365 // These intrinsics are defined to have the same behavior as libm
3366 // functions except for setting errno.
3367 case Intrinsic::sqrt:
3368 case Intrinsic::fma:
3369 case Intrinsic::fmuladd:
3371 // These intrinsics are defined to have the same behavior as libm
3372 // functions, and the corresponding libm functions never set errno.
3373 case Intrinsic::trunc:
3374 case Intrinsic::copysign:
3375 case Intrinsic::fabs:
3376 case Intrinsic::minnum:
3377 case Intrinsic::maxnum:
3379 // These intrinsics are defined to have the same behavior as libm
3380 // functions, which never overflow when operating on the IEEE754 types
3381 // that we support, and never set errno otherwise.
3382 case Intrinsic::ceil:
3383 case Intrinsic::floor:
3384 case Intrinsic::nearbyint:
3385 case Intrinsic::rint:
3386 case Intrinsic::round:
3388 // These intrinsics do not correspond to any libm function, and
3389 // do not set errno.
3390 case Intrinsic::powi:
3392 // TODO: are convert_{from,to}_fp16 safe?
3393 // TODO: can we list target-specific intrinsics here?
3397 return false; // The called function could have undefined behavior or
3398 // side-effects, even if marked readnone nounwind.
3400 case Instruction::VAArg:
3401 case Instruction::Alloca:
3402 case Instruction::Invoke:
3403 case Instruction::PHI:
3404 case Instruction::Store:
3405 case Instruction::Ret:
3406 case Instruction::Br:
3407 case Instruction::IndirectBr:
3408 case Instruction::Switch:
3409 case Instruction::Unreachable:
3410 case Instruction::Fence:
3411 case Instruction::AtomicRMW:
3412 case Instruction::AtomicCmpXchg:
3413 case Instruction::LandingPad:
3414 case Instruction::Resume:
3415 case Instruction::CatchSwitch:
3416 case Instruction::CatchPad:
3417 case Instruction::CatchRet:
3418 case Instruction::CleanupPad:
3419 case Instruction::CleanupRet:
3420 return false; // Misc instructions which have effects
3424 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3425 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3428 /// Return true if we know that the specified value is never null.
3429 bool llvm::isKnownNonNull(const Value *V) {
3430 assert(V->getType()->isPointerTy() && "V must be pointer type");
3432 // Alloca never returns null, malloc might.
3433 if (isa<AllocaInst>(V)) return true;
3435 // A byval, inalloca, or nonnull argument is never null.
3436 if (const Argument *A = dyn_cast<Argument>(V))
3437 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3439 // A global variable in address space 0 is non null unless extern weak
3440 // or an absolute symbol reference. Other address spaces may have null as a
3441 // valid address for a global, so we can't assume anything.
3442 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3443 return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3444 GV->getType()->getAddressSpace() == 0;
3446 // A Load tagged with nonnull metadata is never null.
3447 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3448 return LI->getMetadata(LLVMContext::MD_nonnull);
3450 if (auto CS = ImmutableCallSite(V))
3451 if (CS.isReturnNonNull())
3457 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3458 const Instruction *CtxI,
3459 const DominatorTree *DT) {
3460 assert(V->getType()->isPointerTy() && "V must be pointer type");
3461 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
3462 assert(CtxI && "Context instruction required for analysis");
3463 assert(DT && "Dominator tree required for analysis");
3465 unsigned NumUsesExplored = 0;
3466 for (auto *U : V->users()) {
3467 // Avoid massive lists
3468 if (NumUsesExplored >= DomConditionsMaxUses)
3472 // If the value is used as an argument to a call or invoke, then argument
3473 // attributes may provide an answer about null-ness.
3474 if (auto CS = ImmutableCallSite(U))
3475 if (auto *CalledFunc = CS.getCalledFunction())
3476 for (const Argument &Arg : CalledFunc->args())
3477 if (CS.getArgOperand(Arg.getArgNo()) == V &&
3478 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
3481 // Consider only compare instructions uniquely controlling a branch
3482 CmpInst::Predicate Pred;
3483 if (!match(const_cast<User *>(U),
3484 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3485 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3488 for (auto *CmpU : U->users()) {
3489 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3490 assert(BI->isConditional() && "uses a comparison!");
3492 BasicBlock *NonNullSuccessor =
3493 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3494 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3495 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3497 } else if (Pred == ICmpInst::ICMP_NE &&
3498 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3499 DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3508 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3509 const DominatorTree *DT) {
3510 if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
3513 if (isKnownNonNull(V))
3519 return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3522 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3524 const DataLayout &DL,
3525 AssumptionCache *AC,
3526 const Instruction *CxtI,
3527 const DominatorTree *DT) {
3528 // Multiplying n * m significant bits yields a result of n + m significant
3529 // bits. If the total number of significant bits does not exceed the
3530 // result bit width (minus 1), there is no overflow.
3531 // This means if we have enough leading zero bits in the operands
3532 // we can guarantee that the result does not overflow.
3533 // Ref: "Hacker's Delight" by Henry Warren
3534 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3535 APInt LHSKnownZero(BitWidth, 0);
3536 APInt LHSKnownOne(BitWidth, 0);
3537 APInt RHSKnownZero(BitWidth, 0);
3538 APInt RHSKnownOne(BitWidth, 0);
3539 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3541 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3543 // Note that underestimating the number of zero bits gives a more
3544 // conservative answer.
3545 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3546 RHSKnownZero.countLeadingOnes();
3547 // First handle the easy case: if we have enough zero bits there's
3548 // definitely no overflow.
3549 if (ZeroBits >= BitWidth)
3550 return OverflowResult::NeverOverflows;
3552 // Get the largest possible values for each operand.
3553 APInt LHSMax = ~LHSKnownZero;
3554 APInt RHSMax = ~RHSKnownZero;
3556 // We know the multiply operation doesn't overflow if the maximum values for
3557 // each operand will not overflow after we multiply them together.
3559 LHSMax.umul_ov(RHSMax, MaxOverflow);
3561 return OverflowResult::NeverOverflows;
3563 // We know it always overflows if multiplying the smallest possible values for
3564 // the operands also results in overflow.
3566 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3568 return OverflowResult::AlwaysOverflows;
3570 return OverflowResult::MayOverflow;
3573 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3575 const DataLayout &DL,
3576 AssumptionCache *AC,
3577 const Instruction *CxtI,
3578 const DominatorTree *DT) {
3579 bool LHSKnownNonNegative, LHSKnownNegative;
3580 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3582 if (LHSKnownNonNegative || LHSKnownNegative) {
3583 bool RHSKnownNonNegative, RHSKnownNegative;
3584 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3587 if (LHSKnownNegative && RHSKnownNegative) {
3588 // The sign bit is set in both cases: this MUST overflow.
3589 // Create a simple add instruction, and insert it into the struct.
3590 return OverflowResult::AlwaysOverflows;
3593 if (LHSKnownNonNegative && RHSKnownNonNegative) {
3594 // The sign bit is clear in both cases: this CANNOT overflow.
3595 // Create a simple add instruction, and insert it into the struct.
3596 return OverflowResult::NeverOverflows;
3600 return OverflowResult::MayOverflow;
3603 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3605 const AddOperator *Add,
3606 const DataLayout &DL,
3607 AssumptionCache *AC,
3608 const Instruction *CxtI,
3609 const DominatorTree *DT) {
3610 if (Add && Add->hasNoSignedWrap()) {
3611 return OverflowResult::NeverOverflows;
3614 bool LHSKnownNonNegative, LHSKnownNegative;
3615 bool RHSKnownNonNegative, RHSKnownNegative;
3616 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3618 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3621 if ((LHSKnownNonNegative && RHSKnownNegative) ||
3622 (LHSKnownNegative && RHSKnownNonNegative)) {
3623 // The sign bits are opposite: this CANNOT overflow.
3624 return OverflowResult::NeverOverflows;
3627 // The remaining code needs Add to be available. Early returns if not so.
3629 return OverflowResult::MayOverflow;
3631 // If the sign of Add is the same as at least one of the operands, this add
3632 // CANNOT overflow. This is particularly useful when the sum is
3633 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3635 bool LHSOrRHSKnownNonNegative =
3636 (LHSKnownNonNegative || RHSKnownNonNegative);
3637 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3638 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3639 bool AddKnownNonNegative, AddKnownNegative;
3640 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3641 /*Depth=*/0, AC, CxtI, DT);
3642 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3643 (AddKnownNegative && LHSOrRHSKnownNegative)) {
3644 return OverflowResult::NeverOverflows;
3648 return OverflowResult::MayOverflow;
3651 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3652 const DominatorTree &DT) {
3654 auto IID = II->getIntrinsicID();
3655 assert((IID == Intrinsic::sadd_with_overflow ||
3656 IID == Intrinsic::uadd_with_overflow ||
3657 IID == Intrinsic::ssub_with_overflow ||
3658 IID == Intrinsic::usub_with_overflow ||
3659 IID == Intrinsic::smul_with_overflow ||
3660 IID == Intrinsic::umul_with_overflow) &&
3661 "Not an overflow intrinsic!");
3664 SmallVector<const BranchInst *, 2> GuardingBranches;
3665 SmallVector<const ExtractValueInst *, 2> Results;
3667 for (const User *U : II->users()) {
3668 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3669 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3671 if (EVI->getIndices()[0] == 0)
3672 Results.push_back(EVI);
3674 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3676 for (const auto *U : EVI->users())
3677 if (const auto *B = dyn_cast<BranchInst>(U)) {
3678 assert(B->isConditional() && "How else is it using an i1?");
3679 GuardingBranches.push_back(B);
3683 // We are using the aggregate directly in a way we don't want to analyze
3684 // here (storing it to a global, say).
3689 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3690 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3691 if (!NoWrapEdge.isSingleEdge())
3694 // Check if all users of the add are provably no-wrap.
3695 for (const auto *Result : Results) {
3696 // If the extractvalue itself is not executed on overflow, the we don't
3697 // need to check each use separately, since domination is transitive.
3698 if (DT.dominates(NoWrapEdge, Result->getParent()))
3701 for (auto &RU : Result->uses())
3702 if (!DT.dominates(NoWrapEdge, RU))
3709 return any_of(GuardingBranches, AllUsesGuardedByBranch);
3713 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3714 const DataLayout &DL,
3715 AssumptionCache *AC,
3716 const Instruction *CxtI,
3717 const DominatorTree *DT) {
3718 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3719 Add, DL, AC, CxtI, DT);
3722 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3724 const DataLayout &DL,
3725 AssumptionCache *AC,
3726 const Instruction *CxtI,
3727 const DominatorTree *DT) {
3728 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3731 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3732 // A memory operation returns normally if it isn't volatile. A volatile
3733 // operation is allowed to trap.
3735 // An atomic operation isn't guaranteed to return in a reasonable amount of
3736 // time because it's possible for another thread to interfere with it for an
3737 // arbitrary length of time, but programs aren't allowed to rely on that.
3738 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3739 return !LI->isVolatile();
3740 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3741 return !SI->isVolatile();
3742 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3743 return !CXI->isVolatile();
3744 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3745 return !RMWI->isVolatile();
3746 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3747 return !MII->isVolatile();
3749 // If there is no successor, then execution can't transfer to it.
3750 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3751 return !CRI->unwindsToCaller();
3752 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3753 return !CatchSwitch->unwindsToCaller();
3754 if (isa<ResumeInst>(I))
3756 if (isa<ReturnInst>(I))
3758 if (isa<UnreachableInst>(I))
3761 // Calls can throw, or contain an infinite loop, or kill the process.
3762 if (auto CS = ImmutableCallSite(I)) {
3763 // Call sites that throw have implicit non-local control flow.
3764 if (!CS.doesNotThrow())
3767 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3768 // etc. and thus not return. However, LLVM already assumes that
3770 // - Thread exiting actions are modeled as writes to memory invisible to
3773 // - Loops that don't have side effects (side effects are volatile/atomic
3774 // stores and IO) always terminate (see http://llvm.org/PR965).
3775 // Furthermore IO itself is also modeled as writes to memory invisible to
3778 // We rely on those assumptions here, and use the memory effects of the call
3779 // target as a proxy for checking that it always returns.
3781 // FIXME: This isn't aggressive enough; a call which only writes to a global
3782 // is guaranteed to return.
3783 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3784 match(I, m_Intrinsic<Intrinsic::assume>());
3787 // Other instructions return normally.
3791 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3793 // The loop header is guaranteed to be executed for every iteration.
3795 // FIXME: Relax this constraint to cover all basic blocks that are
3796 // guaranteed to be executed at every iteration.
3797 if (I->getParent() != L->getHeader()) return false;
3799 for (const Instruction &LI : *L->getHeader()) {
3800 if (&LI == I) return true;
3801 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3803 llvm_unreachable("Instruction not contained in its own parent basic block.");
3806 bool llvm::propagatesFullPoison(const Instruction *I) {
3807 switch (I->getOpcode()) {
3808 case Instruction::Add:
3809 case Instruction::Sub:
3810 case Instruction::Xor:
3811 case Instruction::Trunc:
3812 case Instruction::BitCast:
3813 case Instruction::AddrSpaceCast:
3814 case Instruction::Mul:
3815 case Instruction::Shl:
3816 case Instruction::GetElementPtr:
3817 // These operations all propagate poison unconditionally. Note that poison
3818 // is not any particular value, so xor or subtraction of poison with
3819 // itself still yields poison, not zero.
3822 case Instruction::AShr:
3823 case Instruction::SExt:
3824 // For these operations, one bit of the input is replicated across
3825 // multiple output bits. A replicated poison bit is still poison.
3828 case Instruction::ICmp:
3829 // Comparing poison with any value yields poison. This is why, for
3830 // instance, x s< (x +nsw 1) can be folded to true.
3838 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3839 switch (I->getOpcode()) {
3840 case Instruction::Store:
3841 return cast<StoreInst>(I)->getPointerOperand();
3843 case Instruction::Load:
3844 return cast<LoadInst>(I)->getPointerOperand();
3846 case Instruction::AtomicCmpXchg:
3847 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3849 case Instruction::AtomicRMW:
3850 return cast<AtomicRMWInst>(I)->getPointerOperand();
3852 case Instruction::UDiv:
3853 case Instruction::SDiv:
3854 case Instruction::URem:
3855 case Instruction::SRem:
3856 return I->getOperand(1);
3863 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3864 // We currently only look for uses of poison values within the same basic
3865 // block, as that makes it easier to guarantee that the uses will be
3866 // executed given that PoisonI is executed.
3868 // FIXME: Expand this to consider uses beyond the same basic block. To do
3869 // this, look out for the distinction between post-dominance and strong
3871 const BasicBlock *BB = PoisonI->getParent();
3873 // Set of instructions that we have proved will yield poison if PoisonI
3875 SmallSet<const Value *, 16> YieldsPoison;
3876 SmallSet<const BasicBlock *, 4> Visited;
3877 YieldsPoison.insert(PoisonI);
3878 Visited.insert(PoisonI->getParent());
3880 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3883 while (Iter++ < MaxDepth) {
3884 for (auto &I : make_range(Begin, End)) {
3885 if (&I != PoisonI) {
3886 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3887 if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3889 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3893 // Mark poison that propagates from I through uses of I.
3894 if (YieldsPoison.count(&I)) {
3895 for (const User *User : I.users()) {
3896 const Instruction *UserI = cast<Instruction>(User);
3897 if (propagatesFullPoison(UserI))
3898 YieldsPoison.insert(User);
3903 if (auto *NextBB = BB->getSingleSuccessor()) {
3904 if (Visited.insert(NextBB).second) {
3906 Begin = BB->getFirstNonPHI()->getIterator();
3917 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
3921 if (auto *C = dyn_cast<ConstantFP>(V))
3926 static bool isKnownNonZero(const Value *V) {
3927 if (auto *C = dyn_cast<ConstantFP>(V))
3928 return !C->isZero();
3932 /// Match non-obvious integer minimum and maximum sequences.
3933 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
3934 Value *CmpLHS, Value *CmpRHS,
3935 Value *TrueVal, Value *FalseVal,
3936 Value *&LHS, Value *&RHS) {
3937 // Assume success. If there's no match, callers should not use these anyway.
3941 // Recognize variations of:
3942 // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
3944 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
3947 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
3948 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3949 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
3950 return {SPF_SMAX, SPNB_NA, false};
3952 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
3953 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3954 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
3955 return {SPF_SMIN, SPNB_NA, false};
3957 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
3958 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3959 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
3960 return {SPF_UMAX, SPNB_NA, false};
3962 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
3963 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3964 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
3965 return {SPF_UMIN, SPNB_NA, false};
3968 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
3969 return {SPF_UNKNOWN, SPNB_NA, false};
3972 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
3973 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
3974 if (match(TrueVal, m_Zero()) &&
3975 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3976 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
3979 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
3980 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
3981 if (match(FalseVal, m_Zero()) &&
3982 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3983 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
3985 if (!match(CmpRHS, m_APInt(C1)))
3986 return {SPF_UNKNOWN, SPNB_NA, false};
3988 // An unsigned min/max can be written with a signed compare.
3990 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
3991 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
3992 // Is the sign bit set?
3993 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
3994 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
3995 if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue())
3996 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
3998 // Is the sign bit clear?
3999 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4000 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4001 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4002 C2->isMinSignedValue())
4003 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4006 // Look through 'not' ops to find disguised signed min/max.
4007 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4008 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4009 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4010 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4011 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4013 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4014 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4015 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4016 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4017 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4019 return {SPF_UNKNOWN, SPNB_NA, false};
4022 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4024 Value *CmpLHS, Value *CmpRHS,
4025 Value *TrueVal, Value *FalseVal,
4026 Value *&LHS, Value *&RHS) {
4030 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may
4031 // return inconsistent results between implementations.
4032 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4033 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4034 // Therefore we behave conservatively and only proceed if at least one of the
4035 // operands is known to not be zero, or if we don't care about signed zeroes.
4038 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4039 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4040 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4041 !isKnownNonZero(CmpRHS))
4042 return {SPF_UNKNOWN, SPNB_NA, false};
4045 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4046 bool Ordered = false;
4048 // When given one NaN and one non-NaN input:
4049 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4050 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4051 // ordered comparison fails), which could be NaN or non-NaN.
4052 // so here we discover exactly what NaN behavior is required/accepted.
4053 if (CmpInst::isFPPredicate(Pred)) {
4054 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4055 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4057 if (LHSSafe && RHSSafe) {
4058 // Both operands are known non-NaN.
4059 NaNBehavior = SPNB_RETURNS_ANY;
4060 } else if (CmpInst::isOrdered(Pred)) {
4061 // An ordered comparison will return false when given a NaN, so it
4065 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4066 NaNBehavior = SPNB_RETURNS_NAN;
4068 NaNBehavior = SPNB_RETURNS_OTHER;
4070 // Completely unsafe.
4071 return {SPF_UNKNOWN, SPNB_NA, false};
4074 // An unordered comparison will return true when given a NaN, so it
4077 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4078 NaNBehavior = SPNB_RETURNS_OTHER;
4080 NaNBehavior = SPNB_RETURNS_NAN;
4082 // Completely unsafe.
4083 return {SPF_UNKNOWN, SPNB_NA, false};
4087 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4088 std::swap(CmpLHS, CmpRHS);
4089 Pred = CmpInst::getSwappedPredicate(Pred);
4090 if (NaNBehavior == SPNB_RETURNS_NAN)
4091 NaNBehavior = SPNB_RETURNS_OTHER;
4092 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4093 NaNBehavior = SPNB_RETURNS_NAN;
4097 // ([if]cmp X, Y) ? X : Y
4098 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4100 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4101 case ICmpInst::ICMP_UGT:
4102 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4103 case ICmpInst::ICMP_SGT:
4104 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4105 case ICmpInst::ICMP_ULT:
4106 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4107 case ICmpInst::ICMP_SLT:
4108 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4109 case FCmpInst::FCMP_UGT:
4110 case FCmpInst::FCMP_UGE:
4111 case FCmpInst::FCMP_OGT:
4112 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4113 case FCmpInst::FCMP_ULT:
4114 case FCmpInst::FCMP_ULE:
4115 case FCmpInst::FCMP_OLT:
4116 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4121 if (match(CmpRHS, m_APInt(C1))) {
4122 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4123 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4125 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4126 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4127 if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) {
4128 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4131 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4132 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4133 if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) {
4134 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4139 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4142 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4143 Instruction::CastOps *CastOp) {
4144 auto *Cast1 = dyn_cast<CastInst>(V1);
4148 *CastOp = Cast1->getOpcode();
4149 Type *SrcTy = Cast1->getSrcTy();
4150 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4151 // If V1 and V2 are both the same cast from the same type, look through V1.
4152 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4153 return Cast2->getOperand(0);
4157 auto *C = dyn_cast<Constant>(V2);
4161 Constant *CastedTo = nullptr;
4163 case Instruction::ZExt:
4164 if (CmpI->isUnsigned())
4165 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4167 case Instruction::SExt:
4168 if (CmpI->isSigned())
4169 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4171 case Instruction::Trunc:
4172 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4174 case Instruction::FPTrunc:
4175 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4177 case Instruction::FPExt:
4178 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4180 case Instruction::FPToUI:
4181 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4183 case Instruction::FPToSI:
4184 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4186 case Instruction::UIToFP:
4187 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4189 case Instruction::SIToFP:
4190 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4199 // Make sure the cast doesn't lose any information.
4200 Constant *CastedBack =
4201 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4202 if (CastedBack != C)
4208 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4209 Instruction::CastOps *CastOp) {
4210 SelectInst *SI = dyn_cast<SelectInst>(V);
4211 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4213 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4214 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4216 CmpInst::Predicate Pred = CmpI->getPredicate();
4217 Value *CmpLHS = CmpI->getOperand(0);
4218 Value *CmpRHS = CmpI->getOperand(1);
4219 Value *TrueVal = SI->getTrueValue();
4220 Value *FalseVal = SI->getFalseValue();
4222 if (isa<FPMathOperator>(CmpI))
4223 FMF = CmpI->getFastMathFlags();
4226 if (CmpI->isEquality())
4227 return {SPF_UNKNOWN, SPNB_NA, false};
4229 // Deal with type mismatches.
4230 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4231 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4232 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4233 cast<CastInst>(TrueVal)->getOperand(0), C,
4235 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4236 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4237 C, cast<CastInst>(FalseVal)->getOperand(0),
4240 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4244 /// Return true if "icmp Pred LHS RHS" is always true.
4245 static bool isTruePredicate(CmpInst::Predicate Pred,
4246 const Value *LHS, const Value *RHS,
4247 const DataLayout &DL, unsigned Depth,
4248 AssumptionCache *AC, const Instruction *CxtI,
4249 const DominatorTree *DT) {
4250 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4251 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4258 case CmpInst::ICMP_SLE: {
4261 // LHS s<= LHS +_{nsw} C if C >= 0
4262 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4263 return !C->isNegative();
4267 case CmpInst::ICMP_ULE: {
4270 // LHS u<= LHS +_{nuw} C for any C
4271 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4274 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4275 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4277 const APInt *&CA, const APInt *&CB) {
4278 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4279 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4282 // If X & C == 0 then (X | C) == X +_{nuw} C
4283 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4284 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4285 unsigned BitWidth = CA->getBitWidth();
4286 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4287 computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
4289 if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
4297 const APInt *CLHS, *CRHS;
4298 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4299 return CLHS->ule(*CRHS);
4306 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4307 /// ALHS ARHS" is true. Otherwise, return None.
4308 static Optional<bool>
4309 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4310 const Value *ARHS, const Value *BLHS,
4311 const Value *BRHS, const DataLayout &DL,
4312 unsigned Depth, AssumptionCache *AC,
4313 const Instruction *CxtI, const DominatorTree *DT) {
4318 case CmpInst::ICMP_SLT:
4319 case CmpInst::ICMP_SLE:
4320 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4322 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4326 case CmpInst::ICMP_ULT:
4327 case CmpInst::ICMP_ULE:
4328 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4330 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4336 /// Return true if the operands of the two compares match. IsSwappedOps is true
4337 /// when the operands match, but are swapped.
4338 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4339 const Value *BLHS, const Value *BRHS,
4340 bool &IsSwappedOps) {
4342 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4343 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4344 return IsMatchingOps || IsSwappedOps;
4347 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4348 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4349 /// BRHS" is false. Otherwise, return None if we can't infer anything.
4350 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4353 CmpInst::Predicate BPred,
4356 bool IsSwappedOps) {
4357 // Canonicalize the operands so they're matching.
4359 std::swap(BLHS, BRHS);
4360 BPred = ICmpInst::getSwappedPredicate(BPred);
4362 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4364 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4370 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4371 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4372 /// C2" is false. Otherwise, return None if we can't infer anything.
4373 static Optional<bool>
4374 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4375 const ConstantInt *C1,
4376 CmpInst::Predicate BPred,
4377 const Value *BLHS, const ConstantInt *C2) {
4378 assert(ALHS == BLHS && "LHS operands must match.");
4379 ConstantRange DomCR =
4380 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4382 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4383 ConstantRange Intersection = DomCR.intersectWith(CR);
4384 ConstantRange Difference = DomCR.difference(CR);
4385 if (Intersection.isEmptySet())
4387 if (Difference.isEmptySet())
4392 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4393 const DataLayout &DL, bool InvertAPred,
4394 unsigned Depth, AssumptionCache *AC,
4395 const Instruction *CxtI,
4396 const DominatorTree *DT) {
4397 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4398 if (LHS->getType() != RHS->getType())
4401 Type *OpTy = LHS->getType();
4402 assert(OpTy->getScalarType()->isIntegerTy(1));
4404 // LHS ==> RHS by definition
4405 if (!InvertAPred && LHS == RHS)
4408 if (OpTy->isVectorTy())
4409 // TODO: extending the code below to handle vectors
4411 assert(OpTy->isIntegerTy(1) && "implied by above");
4413 ICmpInst::Predicate APred, BPred;
4417 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4418 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4422 APred = CmpInst::getInversePredicate(APred);
4424 // Can we infer anything when the two compares have matching operands?
4426 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4427 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4428 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4430 // No amount of additional analysis will infer the second condition, so
4435 // Can we infer anything when the LHS operands match and the RHS operands are
4436 // constants (not necessarily matching)?
4437 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4438 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4439 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4440 cast<ConstantInt>(BRHS)))
4442 // No amount of additional analysis will infer the second condition, so
4448 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,