1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interface for lazy computation of value constraint
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LazyValueInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/CFG.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/Instructions.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/PatternMatch.h"
31 #include "llvm/IR/ValueHandle.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
37 using namespace PatternMatch;
39 #define DEBUG_TYPE "lazy-value-info"
41 char LazyValueInfoWrapperPass::ID = 0;
42 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
43 "Lazy Value Information Analysis", false, true)
44 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
45 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
46 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
47 "Lazy Value Information Analysis", false, true)
50 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
53 char LazyValueAnalysis::PassID;
55 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 /// This is the information tracked by LazyValueInfo for each value.
61 /// FIXME: This is basically just for bringup, this can be made a lot more rich
67 /// This Value has no known value yet. As a result, this implies the
68 /// producing instruction is dead. Caution: We use this as the starting
69 /// state in our local meet rules. In this usage, it's taken to mean
70 /// "nothing known yet".
73 /// This Value has a specific constant value. (For integers, constantrange
77 /// This Value is known to not have the specified value. (For integers,
78 /// constantrange is used instead.)
81 /// The Value falls within this range. (Used only for integer typed values.)
84 /// We can not precisely model the dynamic values this value might take.
88 /// Val: This stores the current lattice value along with the Constant* for
89 /// the constant if this is a 'constant' or 'notconstant' value.
95 LVILatticeVal() : Tag(undefined), Val(nullptr), Range(1, true) {}
97 static LVILatticeVal get(Constant *C) {
99 if (!isa<UndefValue>(C))
103 static LVILatticeVal getNot(Constant *C) {
105 if (!isa<UndefValue>(C))
106 Res.markNotConstant(C);
109 static LVILatticeVal getRange(ConstantRange CR) {
111 Res.markConstantRange(std::move(CR));
114 static LVILatticeVal getOverdefined() {
116 Res.markOverdefined();
120 bool isUndefined() const { return Tag == undefined; }
121 bool isConstant() const { return Tag == constant; }
122 bool isNotConstant() const { return Tag == notconstant; }
123 bool isConstantRange() const { return Tag == constantrange; }
124 bool isOverdefined() const { return Tag == overdefined; }
126 Constant *getConstant() const {
127 assert(isConstant() && "Cannot get the constant of a non-constant!");
131 Constant *getNotConstant() const {
132 assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
136 ConstantRange getConstantRange() const {
137 assert(isConstantRange() &&
138 "Cannot get the constant-range of a non-constant-range!");
142 /// Return true if this is a change in status.
143 bool markOverdefined() {
150 /// Return true if this is a change in status.
151 bool markConstant(Constant *V) {
152 assert(V && "Marking constant with NULL");
153 if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
154 return markConstantRange(ConstantRange(CI->getValue()));
155 if (isa<UndefValue>(V))
158 assert((!isConstant() || getConstant() == V) &&
159 "Marking constant with different value");
160 assert(isUndefined());
166 /// Return true if this is a change in status.
167 bool markNotConstant(Constant *V) {
168 assert(V && "Marking constant with NULL");
169 if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
170 return markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
171 if (isa<UndefValue>(V))
174 assert((!isConstant() || getConstant() != V) &&
175 "Marking constant !constant with same value");
176 assert((!isNotConstant() || getNotConstant() == V) &&
177 "Marking !constant with different value");
178 assert(isUndefined() || isConstant());
184 /// Return true if this is a change in status.
185 bool markConstantRange(ConstantRange NewR) {
186 if (isConstantRange()) {
187 if (NewR.isEmptySet())
188 return markOverdefined();
190 bool changed = Range != NewR;
191 Range = std::move(NewR);
195 assert(isUndefined());
196 if (NewR.isEmptySet())
197 return markOverdefined();
200 Range = std::move(NewR);
204 /// Merge the specified lattice value into this one, updating this
205 /// one and returning true if anything changed.
206 bool mergeIn(const LVILatticeVal &RHS, const DataLayout &DL) {
207 if (RHS.isUndefined() || isOverdefined()) return false;
208 if (RHS.isOverdefined()) return markOverdefined();
218 if (RHS.isConstant()) {
221 return markOverdefined();
224 if (RHS.isNotConstant()) {
226 return markOverdefined();
228 // Unless we can prove that the two Constants are different, we must
229 // move to overdefined.
230 if (ConstantInt *Res =
231 dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
232 CmpInst::ICMP_NE, getConstant(), RHS.getNotConstant(), DL)))
234 return markNotConstant(RHS.getNotConstant());
236 return markOverdefined();
239 return markOverdefined();
242 if (isNotConstant()) {
243 if (RHS.isConstant()) {
245 return markOverdefined();
247 // Unless we can prove that the two Constants are different, we must
248 // move to overdefined.
249 if (ConstantInt *Res =
250 dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
251 CmpInst::ICMP_NE, getNotConstant(), RHS.getConstant(), DL)))
255 return markOverdefined();
258 if (RHS.isNotConstant()) {
261 return markOverdefined();
264 return markOverdefined();
267 assert(isConstantRange() && "New LVILattice type?");
268 if (!RHS.isConstantRange())
269 return markOverdefined();
271 ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
272 if (NewR.isFullSet())
273 return markOverdefined();
274 return markConstantRange(NewR);
278 } // end anonymous namespace.
281 raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val)
283 raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
284 if (Val.isUndefined())
285 return OS << "undefined";
286 if (Val.isOverdefined())
287 return OS << "overdefined";
289 if (Val.isNotConstant())
290 return OS << "notconstant<" << *Val.getNotConstant() << '>';
291 if (Val.isConstantRange())
292 return OS << "constantrange<" << Val.getConstantRange().getLower() << ", "
293 << Val.getConstantRange().getUpper() << '>';
294 return OS << "constant<" << *Val.getConstant() << '>';
298 /// Returns true if this lattice value represents at most one possible value.
299 /// This is as precise as any lattice value can get while still representing
301 static bool hasSingleValue(const LVILatticeVal &Val) {
302 if (Val.isConstantRange() &&
303 Val.getConstantRange().isSingleElement())
304 // Integer constants are single element ranges
306 if (Val.isConstant())
307 // Non integer constants
312 /// Combine two sets of facts about the same value into a single set of
313 /// facts. Note that this method is not suitable for merging facts along
314 /// different paths in a CFG; that's what the mergeIn function is for. This
315 /// is for merging facts gathered about the same value at the same location
316 /// through two independent means.
318 /// * This method does not promise to return the most precise possible lattice
319 /// value implied by A and B. It is allowed to return any lattice element
320 /// which is at least as strong as *either* A or B (unless our facts
321 /// conflict, see below).
322 /// * Due to unreachable code, the intersection of two lattice values could be
323 /// contradictory. If this happens, we return some valid lattice value so as
324 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
325 /// we do not make this guarantee. TODO: This would be a useful enhancement.
326 static LVILatticeVal intersect(LVILatticeVal A, LVILatticeVal B) {
327 // Undefined is the strongest state. It means the value is known to be along
328 // an unreachable path.
334 // If we gave up for one, but got a useable fact from the other, use it.
335 if (A.isOverdefined())
337 if (B.isOverdefined())
340 // Can't get any more precise than constants.
341 if (hasSingleValue(A))
343 if (hasSingleValue(B))
346 // Could be either constant range or not constant here.
347 if (!A.isConstantRange() || !B.isConstantRange()) {
348 // TODO: Arbitrary choice, could be improved
352 // Intersect two constant ranges
353 ConstantRange Range =
354 A.getConstantRange().intersectWith(B.getConstantRange());
355 // Note: An empty range is implicitly converted to overdefined internally.
356 // TODO: We could instead use Undefined here since we've proven a conflict
357 // and thus know this path must be unreachable.
358 return LVILatticeVal::getRange(std::move(Range));
361 //===----------------------------------------------------------------------===//
362 // LazyValueInfoCache Decl
363 //===----------------------------------------------------------------------===//
366 /// A callback value handle updates the cache when values are erased.
367 class LazyValueInfoCache;
368 struct LVIValueHandle final : public CallbackVH {
369 LazyValueInfoCache *Parent;
371 LVIValueHandle(Value *V, LazyValueInfoCache *P)
372 : CallbackVH(V), Parent(P) { }
374 void deleted() override;
375 void allUsesReplacedWith(Value *V) override {
382 /// This is the cache kept by LazyValueInfo which
383 /// maintains information about queries across the clients' queries.
384 class LazyValueInfoCache {
385 /// This is all of the cached block information for exactly one Value*.
386 /// The entries are sorted by the BasicBlock* of the
387 /// entries, allowing us to do a lookup with a binary search.
388 /// Over-defined lattice values are recorded in OverDefinedCache to reduce
390 typedef SmallDenseMap<AssertingVH<BasicBlock>, LVILatticeVal, 4>
393 /// This is all of the cached information for all values,
394 /// mapped from Value* to key information.
395 std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
397 /// This tracks, on a per-block basis, the set of values that are
398 /// over-defined at the end of that block.
399 typedef DenseMap<AssertingVH<BasicBlock>, SmallPtrSet<Value *, 4>>
401 OverDefinedCacheTy OverDefinedCache;
403 /// Keep track of all blocks that we have ever seen, so we
404 /// don't spend time removing unused blocks from our caches.
405 DenseSet<AssertingVH<BasicBlock> > SeenBlocks;
407 /// This stack holds the state of the value solver during a query.
408 /// It basically emulates the callstack of the naive
409 /// recursive value lookup process.
410 std::stack<std::pair<BasicBlock*, Value*> > BlockValueStack;
412 /// Keeps track of which block-value pairs are in BlockValueStack.
413 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
415 /// Push BV onto BlockValueStack unless it's already in there.
416 /// Returns true on success.
417 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
418 if (!BlockValueSet.insert(BV).second)
419 return false; // It's already in the stack.
421 DEBUG(dbgs() << "PUSH: " << *BV.second << " in " << BV.first->getName()
423 BlockValueStack.push(BV);
427 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
428 const DataLayout &DL; ///< A mandatory DataLayout
429 DominatorTree *DT; ///< An optional DT pointer.
431 friend struct LVIValueHandle;
433 void insertResult(Value *Val, BasicBlock *BB, const LVILatticeVal &Result) {
434 SeenBlocks.insert(BB);
436 // Insert over-defined values into their own cache to reduce memory
438 if (Result.isOverdefined())
439 OverDefinedCache[BB].insert(Val);
441 lookup(Val)[BB] = Result;
444 LVILatticeVal getBlockValue(Value *Val, BasicBlock *BB);
445 bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T,
446 LVILatticeVal &Result, Instruction *CxtI = nullptr);
447 bool hasBlockValue(Value *Val, BasicBlock *BB);
449 // These methods process one work item and may add more. A false value
450 // returned means that the work item was not completely processed and must
451 // be revisited after going through the new items.
452 bool solveBlockValue(Value *Val, BasicBlock *BB);
453 bool solveBlockValueNonLocal(LVILatticeVal &BBLV, Value *Val, BasicBlock *BB);
454 bool solveBlockValuePHINode(LVILatticeVal &BBLV, PHINode *PN, BasicBlock *BB);
455 bool solveBlockValueSelect(LVILatticeVal &BBLV, SelectInst *S,
457 bool solveBlockValueBinaryOp(LVILatticeVal &BBLV, Instruction *BBI,
459 bool solveBlockValueCast(LVILatticeVal &BBLV, Instruction *BBI,
461 void intersectAssumeBlockValueConstantRange(Value *Val, LVILatticeVal &BBLV,
466 ValueCacheEntryTy &lookup(Value *V) {
467 return ValueCache[LVIValueHandle(V, this)];
470 bool isOverdefined(Value *V, BasicBlock *BB) const {
471 auto ODI = OverDefinedCache.find(BB);
473 if (ODI == OverDefinedCache.end())
476 return ODI->second.count(V);
479 bool hasCachedValueInfo(Value *V, BasicBlock *BB) {
480 if (isOverdefined(V, BB))
483 LVIValueHandle ValHandle(V, this);
484 auto I = ValueCache.find(ValHandle);
485 if (I == ValueCache.end())
488 return I->second.count(BB);
491 LVILatticeVal getCachedValueInfo(Value *V, BasicBlock *BB) {
492 if (isOverdefined(V, BB))
493 return LVILatticeVal::getOverdefined();
495 return lookup(V)[BB];
499 /// This is the query interface to determine the lattice
500 /// value for the specified Value* at the end of the specified block.
501 LVILatticeVal getValueInBlock(Value *V, BasicBlock *BB,
502 Instruction *CxtI = nullptr);
504 /// This is the query interface to determine the lattice
505 /// value for the specified Value* at the specified instruction (generally
506 /// from an assume intrinsic).
507 LVILatticeVal getValueAt(Value *V, Instruction *CxtI);
509 /// This is the query interface to determine the lattice
510 /// value for the specified Value* that is true on the specified edge.
511 LVILatticeVal getValueOnEdge(Value *V, BasicBlock *FromBB,BasicBlock *ToBB,
512 Instruction *CxtI = nullptr);
514 /// This is the update interface to inform the cache that an edge from
515 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
516 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
518 /// This is part of the update interface to inform the cache
519 /// that a block has been deleted.
520 void eraseBlock(BasicBlock *BB);
522 /// clear - Empty the cache.
526 OverDefinedCache.clear();
529 LazyValueInfoCache(AssumptionCache *AC, const DataLayout &DL,
530 DominatorTree *DT = nullptr)
531 : AC(AC), DL(DL), DT(DT) {}
533 } // end anonymous namespace
535 void LVIValueHandle::deleted() {
536 SmallVector<AssertingVH<BasicBlock>, 4> ToErase;
537 for (auto &I : Parent->OverDefinedCache) {
538 SmallPtrSetImpl<Value *> &ValueSet = I.second;
539 if (ValueSet.count(getValPtr()))
540 ValueSet.erase(getValPtr());
541 if (ValueSet.empty())
542 ToErase.push_back(I.first);
544 for (auto &BB : ToErase)
545 Parent->OverDefinedCache.erase(BB);
547 // This erasure deallocates *this, so it MUST happen after we're done
548 // using any and all members of *this.
549 Parent->ValueCache.erase(*this);
552 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
553 // Shortcut if we have never seen this block.
554 DenseSet<AssertingVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
555 if (I == SeenBlocks.end())
559 auto ODI = OverDefinedCache.find(BB);
560 if (ODI != OverDefinedCache.end())
561 OverDefinedCache.erase(ODI);
563 for (auto &I : ValueCache)
567 void LazyValueInfoCache::solve() {
568 while (!BlockValueStack.empty()) {
569 std::pair<BasicBlock*, Value*> &e = BlockValueStack.top();
570 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
572 if (solveBlockValue(e.second, e.first)) {
573 // The work item was completely processed.
574 assert(BlockValueStack.top() == e && "Nothing should have been pushed!");
575 assert(hasCachedValueInfo(e.second, e.first) &&
576 "Result should be in cache!");
578 DEBUG(dbgs() << "POP " << *e.second << " in " << e.first->getName()
579 << " = " << getCachedValueInfo(e.second, e.first) << "\n");
581 BlockValueStack.pop();
582 BlockValueSet.erase(e);
584 // More work needs to be done before revisiting.
585 assert(BlockValueStack.top() != e && "Stack should have been pushed!");
590 bool LazyValueInfoCache::hasBlockValue(Value *Val, BasicBlock *BB) {
591 // If already a constant, there is nothing to compute.
592 if (isa<Constant>(Val))
595 return hasCachedValueInfo(Val, BB);
598 LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
599 // If already a constant, there is nothing to compute.
600 if (Constant *VC = dyn_cast<Constant>(Val))
601 return LVILatticeVal::get(VC);
603 SeenBlocks.insert(BB);
604 return getCachedValueInfo(Val, BB);
607 static LVILatticeVal getFromRangeMetadata(Instruction *BBI) {
608 switch (BBI->getOpcode()) {
610 case Instruction::Load:
611 case Instruction::Call:
612 case Instruction::Invoke:
613 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
614 if (isa<IntegerType>(BBI->getType())) {
615 return LVILatticeVal::getRange(getConstantRangeFromMetadata(*Ranges));
619 // Nothing known - will be intersected with other facts
620 return LVILatticeVal::getOverdefined();
623 bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
624 if (isa<Constant>(Val))
627 if (hasCachedValueInfo(Val, BB)) {
628 // If we have a cached value, use that.
629 DEBUG(dbgs() << " reuse BB '" << BB->getName()
630 << "' val=" << getCachedValueInfo(Val, BB) << '\n');
632 // Since we're reusing a cached value, we don't need to update the
633 // OverDefinedCache. The cache will have been properly updated whenever the
634 // cached value was inserted.
638 // Hold off inserting this value into the Cache in case we have to return
639 // false and come back later.
642 Instruction *BBI = dyn_cast<Instruction>(Val);
643 if (!BBI || BBI->getParent() != BB) {
644 if (!solveBlockValueNonLocal(Res, Val, BB))
646 insertResult(Val, BB, Res);
650 if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
651 if (!solveBlockValuePHINode(Res, PN, BB))
653 insertResult(Val, BB, Res);
657 if (auto *SI = dyn_cast<SelectInst>(BBI)) {
658 if (!solveBlockValueSelect(Res, SI, BB))
660 insertResult(Val, BB, Res);
664 // If this value is a nonnull pointer, record it's range and bailout. Note
665 // that for all other pointer typed values, we terminate the search at the
666 // definition. We could easily extend this to look through geps, bitcasts,
667 // and the like to prove non-nullness, but it's not clear that's worth it
668 // compile time wise. The context-insensative value walk done inside
669 // isKnownNonNull gets most of the profitable cases at much less expense.
670 // This does mean that we have a sensativity to where the defining
671 // instruction is placed, even if it could legally be hoisted much higher.
672 // That is unfortunate.
673 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
674 if (PT && isKnownNonNull(BBI)) {
675 Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT));
676 insertResult(Val, BB, Res);
679 if (BBI->getType()->isIntegerTy()) {
680 if (isa<CastInst>(BBI)) {
681 if (!solveBlockValueCast(Res, BBI, BB))
683 insertResult(Val, BB, Res);
686 BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
687 if (BO && isa<ConstantInt>(BO->getOperand(1))) {
688 if (!solveBlockValueBinaryOp(Res, BBI, BB))
690 insertResult(Val, BB, Res);
695 DEBUG(dbgs() << " compute BB '" << BB->getName()
696 << "' - unknown inst def found.\n");
697 Res = getFromRangeMetadata(BBI);
698 insertResult(Val, BB, Res);
702 static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
703 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
704 return L->getPointerAddressSpace() == 0 &&
705 GetUnderlyingObject(L->getPointerOperand(),
706 L->getModule()->getDataLayout()) == Ptr;
708 if (StoreInst *S = dyn_cast<StoreInst>(I)) {
709 return S->getPointerAddressSpace() == 0 &&
710 GetUnderlyingObject(S->getPointerOperand(),
711 S->getModule()->getDataLayout()) == Ptr;
713 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
714 if (MI->isVolatile()) return false;
716 // FIXME: check whether it has a valuerange that excludes zero?
717 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
718 if (!Len || Len->isZero()) return false;
720 if (MI->getDestAddressSpace() == 0)
721 if (GetUnderlyingObject(MI->getRawDest(),
722 MI->getModule()->getDataLayout()) == Ptr)
724 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
725 if (MTI->getSourceAddressSpace() == 0)
726 if (GetUnderlyingObject(MTI->getRawSource(),
727 MTI->getModule()->getDataLayout()) == Ptr)
733 /// Return true if the allocation associated with Val is ever dereferenced
734 /// within the given basic block. This establishes the fact Val is not null,
735 /// but does not imply that the memory at Val is dereferenceable. (Val may
736 /// point off the end of the dereferenceable part of the object.)
737 static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
738 assert(Val->getType()->isPointerTy());
740 const DataLayout &DL = BB->getModule()->getDataLayout();
741 Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
742 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
743 // inside InstructionDereferencesPointer either.
744 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
745 for (Instruction &I : *BB)
746 if (InstructionDereferencesPointer(&I, UnderlyingVal))
751 bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
752 Value *Val, BasicBlock *BB) {
753 LVILatticeVal Result; // Start Undefined.
755 // If this is the entry block, we must be asking about an argument. The
756 // value is overdefined.
757 if (BB == &BB->getParent()->getEntryBlock()) {
758 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
759 // Bofore giving up, see if we can prove the pointer non-null local to
760 // this particular block.
761 if (Val->getType()->isPointerTy() &&
762 (isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) {
763 PointerType *PTy = cast<PointerType>(Val->getType());
764 Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
766 Result.markOverdefined();
772 // Loop over all of our predecessors, merging what we know from them into
774 bool EdgesMissing = false;
775 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
776 LVILatticeVal EdgeResult;
777 EdgesMissing |= !getEdgeValue(Val, *PI, BB, EdgeResult);
781 Result.mergeIn(EdgeResult, DL);
783 // If we hit overdefined, exit early. The BlockVals entry is already set
785 if (Result.isOverdefined()) {
786 DEBUG(dbgs() << " compute BB '" << BB->getName()
787 << "' - overdefined because of pred (non local).\n");
788 // Bofore giving up, see if we can prove the pointer non-null local to
789 // this particular block.
790 if (Val->getType()->isPointerTy() &&
791 isObjectDereferencedInBlock(Val, BB)) {
792 PointerType *PTy = cast<PointerType>(Val->getType());
793 Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
803 // Return the merged value, which is more precise than 'overdefined'.
804 assert(!Result.isOverdefined());
809 bool LazyValueInfoCache::solveBlockValuePHINode(LVILatticeVal &BBLV,
810 PHINode *PN, BasicBlock *BB) {
811 LVILatticeVal Result; // Start Undefined.
813 // Loop over all of our predecessors, merging what we know from them into
815 bool EdgesMissing = false;
816 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
817 BasicBlock *PhiBB = PN->getIncomingBlock(i);
818 Value *PhiVal = PN->getIncomingValue(i);
819 LVILatticeVal EdgeResult;
820 // Note that we can provide PN as the context value to getEdgeValue, even
821 // though the results will be cached, because PN is the value being used as
822 // the cache key in the caller.
823 EdgesMissing |= !getEdgeValue(PhiVal, PhiBB, BB, EdgeResult, PN);
827 Result.mergeIn(EdgeResult, DL);
829 // If we hit overdefined, exit early. The BlockVals entry is already set
831 if (Result.isOverdefined()) {
832 DEBUG(dbgs() << " compute BB '" << BB->getName()
833 << "' - overdefined because of pred (local).\n");
842 // Return the merged value, which is more precise than 'overdefined'.
843 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
848 static bool getValueFromFromCondition(Value *Val, ICmpInst *ICI,
849 LVILatticeVal &Result,
850 bool isTrueDest = true);
852 // If we can determine a constraint on the value given conditions assumed by
853 // the program, intersect those constraints with BBLV
854 void LazyValueInfoCache::intersectAssumeBlockValueConstantRange(Value *Val,
857 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
861 for (auto &AssumeVH : AC->assumptions()) {
864 auto *I = cast<CallInst>(AssumeVH);
865 if (!isValidAssumeForContext(I, BBI, DT))
868 Value *C = I->getArgOperand(0);
869 if (ICmpInst *ICI = dyn_cast<ICmpInst>(C)) {
870 LVILatticeVal Result;
871 if (getValueFromFromCondition(Val, ICI, Result))
872 BBLV = intersect(BBLV, Result);
877 bool LazyValueInfoCache::solveBlockValueSelect(LVILatticeVal &BBLV,
878 SelectInst *SI, BasicBlock *BB) {
880 // Recurse on our inputs if needed
881 if (!hasBlockValue(SI->getTrueValue(), BB)) {
882 if (pushBlockValue(std::make_pair(BB, SI->getTrueValue())))
884 BBLV.markOverdefined();
887 LVILatticeVal TrueVal = getBlockValue(SI->getTrueValue(), BB);
888 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
889 // extra slots in the table if we can.
890 if (TrueVal.isOverdefined()) {
891 BBLV.markOverdefined();
895 if (!hasBlockValue(SI->getFalseValue(), BB)) {
896 if (pushBlockValue(std::make_pair(BB, SI->getFalseValue())))
898 BBLV.markOverdefined();
901 LVILatticeVal FalseVal = getBlockValue(SI->getFalseValue(), BB);
902 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
903 // extra slots in the table if we can.
904 if (FalseVal.isOverdefined()) {
905 BBLV.markOverdefined();
909 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
910 ConstantRange TrueCR = TrueVal.getConstantRange();
911 ConstantRange FalseCR = FalseVal.getConstantRange();
912 Value *LHS = nullptr;
913 Value *RHS = nullptr;
914 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
915 // Is this a min specifically of our two inputs? (Avoid the risk of
916 // ValueTracking getting smarter looking back past our immediate inputs.)
917 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
918 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
919 switch (SPR.Flavor) {
921 llvm_unreachable("unexpected minmax type!");
922 case SPF_SMIN: /// Signed minimum
923 BBLV.markConstantRange(TrueCR.smin(FalseCR));
925 case SPF_UMIN: /// Unsigned minimum
926 BBLV.markConstantRange(TrueCR.umin(FalseCR));
928 case SPF_SMAX: /// Signed maximum
929 BBLV.markConstantRange(TrueCR.smax(FalseCR));
931 case SPF_UMAX: /// Unsigned maximum
932 BBLV.markConstantRange(TrueCR.umax(FalseCR));
937 // TODO: ABS, NABS from the SelectPatternResult
940 // Can we constrain the facts about the true and false values by using the
941 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
942 // TODO: We could potentially refine an overdefined true value above.
943 if (auto *ICI = dyn_cast<ICmpInst>(SI->getCondition())) {
944 LVILatticeVal TrueValTaken, FalseValTaken;
945 if (!getValueFromFromCondition(SI->getTrueValue(), ICI,
947 TrueValTaken.markOverdefined();
948 if (!getValueFromFromCondition(SI->getFalseValue(), ICI,
949 FalseValTaken, false))
950 FalseValTaken.markOverdefined();
952 TrueVal = intersect(TrueVal, TrueValTaken);
953 FalseVal = intersect(FalseVal, FalseValTaken);
956 // Handle clamp idioms such as:
957 // %24 = constantrange<0, 17>
958 // %39 = icmp eq i32 %24, 0
959 // %40 = add i32 %24, -1
960 // %siv.next = select i1 %39, i32 16, i32 %40
961 // %siv.next = constantrange<0, 17> not <-1, 17>
962 // In general, this can handle any clamp idiom which tests the edge
963 // condition via an equality or inequality.
964 ICmpInst::Predicate Pred = ICI->getPredicate();
965 Value *A = ICI->getOperand(0);
966 if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
967 auto addConstants = [](ConstantInt *A, ConstantInt *B) {
968 assert(A->getType() == B->getType());
969 return ConstantInt::get(A->getType(), A->getValue() + B->getValue());
971 // See if either input is A + C2, subject to the constraint from the
972 // condition that A != C when that input is used. We can assume that
973 // that input doesn't include C + C2.
974 ConstantInt *CIAdded;
977 case ICmpInst::ICMP_EQ:
978 if (match(SI->getFalseValue(), m_Add(m_Specific(A),
979 m_ConstantInt(CIAdded)))) {
980 auto ResNot = addConstants(CIBase, CIAdded);
981 FalseVal = intersect(FalseVal,
982 LVILatticeVal::getNot(ResNot));
985 case ICmpInst::ICMP_NE:
986 if (match(SI->getTrueValue(), m_Add(m_Specific(A),
987 m_ConstantInt(CIAdded)))) {
988 auto ResNot = addConstants(CIBase, CIAdded);
989 TrueVal = intersect(TrueVal,
990 LVILatticeVal::getNot(ResNot));
997 LVILatticeVal Result; // Start Undefined.
998 Result.mergeIn(TrueVal, DL);
999 Result.mergeIn(FalseVal, DL);
1004 bool LazyValueInfoCache::solveBlockValueCast(LVILatticeVal &BBLV,
1007 if (!BBI->getOperand(0)->getType()->isSized()) {
1008 // Without knowing how wide the input is, we can't analyze it in any useful
1010 BBLV.markOverdefined();
1014 // Filter out casts we don't know how to reason about before attempting to
1015 // recurse on our operand. This can cut a long search short if we know we're
1016 // not going to be able to get any useful information anways.
1017 switch (BBI->getOpcode()) {
1018 case Instruction::Trunc:
1019 case Instruction::SExt:
1020 case Instruction::ZExt:
1021 case Instruction::BitCast:
1024 // Unhandled instructions are overdefined.
1025 DEBUG(dbgs() << " compute BB '" << BB->getName()
1026 << "' - overdefined (unknown cast).\n");
1027 BBLV.markOverdefined();
1031 // Figure out the range of the LHS. If that fails, we still apply the
1032 // transfer rule on the full set since we may be able to locally infer
1033 // interesting facts.
1034 if (!hasBlockValue(BBI->getOperand(0), BB))
1035 if (pushBlockValue(std::make_pair(BB, BBI->getOperand(0))))
1036 // More work to do before applying this transfer rule.
1039 const unsigned OperandBitWidth =
1040 DL.getTypeSizeInBits(BBI->getOperand(0)->getType());
1041 ConstantRange LHSRange = ConstantRange(OperandBitWidth);
1042 if (hasBlockValue(BBI->getOperand(0), BB)) {
1043 LVILatticeVal LHSVal = getBlockValue(BBI->getOperand(0), BB);
1044 intersectAssumeBlockValueConstantRange(BBI->getOperand(0), LHSVal, BBI);
1045 if (LHSVal.isConstantRange())
1046 LHSRange = LHSVal.getConstantRange();
1049 const unsigned ResultBitWidth =
1050 cast<IntegerType>(BBI->getType())->getBitWidth();
1052 // NOTE: We're currently limited by the set of operations that ConstantRange
1053 // can evaluate symbolically. Enhancing that set will allows us to analyze
1054 // more definitions.
1055 LVILatticeVal Result;
1056 switch (BBI->getOpcode()) {
1057 case Instruction::Trunc:
1058 Result.markConstantRange(LHSRange.truncate(ResultBitWidth));
1060 case Instruction::SExt:
1061 Result.markConstantRange(LHSRange.signExtend(ResultBitWidth));
1063 case Instruction::ZExt:
1064 Result.markConstantRange(LHSRange.zeroExtend(ResultBitWidth));
1066 case Instruction::BitCast:
1067 Result.markConstantRange(LHSRange);
1070 // Should be dead if the code above is correct
1071 llvm_unreachable("inconsistent with above");
1079 bool LazyValueInfoCache::solveBlockValueBinaryOp(LVILatticeVal &BBLV,
1083 assert(BBI->getOperand(0)->getType()->isSized() &&
1084 "all operands to binary operators are sized");
1086 // Filter out operators we don't know how to reason about before attempting to
1087 // recurse on our operand(s). This can cut a long search short if we know
1088 // we're not going to be able to get any useful information anways.
1089 switch (BBI->getOpcode()) {
1090 case Instruction::Add:
1091 case Instruction::Sub:
1092 case Instruction::Mul:
1093 case Instruction::UDiv:
1094 case Instruction::Shl:
1095 case Instruction::LShr:
1096 case Instruction::And:
1097 case Instruction::Or:
1098 // continue into the code below
1101 // Unhandled instructions are overdefined.
1102 DEBUG(dbgs() << " compute BB '" << BB->getName()
1103 << "' - overdefined (unknown binary operator).\n");
1104 BBLV.markOverdefined();
1108 // Figure out the range of the LHS. If that fails, use a conservative range,
1109 // but apply the transfer rule anyways. This lets us pick up facts from
1110 // expressions like "and i32 (call i32 @foo()), 32"
1111 if (!hasBlockValue(BBI->getOperand(0), BB))
1112 if (pushBlockValue(std::make_pair(BB, BBI->getOperand(0))))
1113 // More work to do before applying this transfer rule.
1116 const unsigned OperandBitWidth =
1117 DL.getTypeSizeInBits(BBI->getOperand(0)->getType());
1118 ConstantRange LHSRange = ConstantRange(OperandBitWidth);
1119 if (hasBlockValue(BBI->getOperand(0), BB)) {
1120 LVILatticeVal LHSVal = getBlockValue(BBI->getOperand(0), BB);
1121 intersectAssumeBlockValueConstantRange(BBI->getOperand(0), LHSVal, BBI);
1122 if (LHSVal.isConstantRange())
1123 LHSRange = LHSVal.getConstantRange();
1126 ConstantInt *RHS = cast<ConstantInt>(BBI->getOperand(1));
1127 ConstantRange RHSRange = ConstantRange(RHS->getValue());
1129 // NOTE: We're currently limited by the set of operations that ConstantRange
1130 // can evaluate symbolically. Enhancing that set will allows us to analyze
1131 // more definitions.
1132 LVILatticeVal Result;
1133 switch (BBI->getOpcode()) {
1134 case Instruction::Add:
1135 Result.markConstantRange(LHSRange.add(RHSRange));
1137 case Instruction::Sub:
1138 Result.markConstantRange(LHSRange.sub(RHSRange));
1140 case Instruction::Mul:
1141 Result.markConstantRange(LHSRange.multiply(RHSRange));
1143 case Instruction::UDiv:
1144 Result.markConstantRange(LHSRange.udiv(RHSRange));
1146 case Instruction::Shl:
1147 Result.markConstantRange(LHSRange.shl(RHSRange));
1149 case Instruction::LShr:
1150 Result.markConstantRange(LHSRange.lshr(RHSRange));
1152 case Instruction::And:
1153 Result.markConstantRange(LHSRange.binaryAnd(RHSRange));
1155 case Instruction::Or:
1156 Result.markConstantRange(LHSRange.binaryOr(RHSRange));
1159 // Should be dead if the code above is correct
1160 llvm_unreachable("inconsistent with above");
1168 bool getValueFromFromCondition(Value *Val, ICmpInst *ICI,
1169 LVILatticeVal &Result, bool isTrueDest) {
1170 assert(ICI && "precondition");
1171 if (isa<Constant>(ICI->getOperand(1))) {
1172 if (ICI->isEquality() && ICI->getOperand(0) == Val) {
1173 // We know that V has the RHS constant if this is a true SETEQ or
1175 if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
1176 Result = LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
1178 Result = LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
1182 // Recognize the range checking idiom that InstCombine produces.
1183 // (X-C1) u< C2 --> [C1, C1+C2)
1184 ConstantInt *NegOffset = nullptr;
1185 if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
1186 match(ICI->getOperand(0), m_Add(m_Specific(Val),
1187 m_ConstantInt(NegOffset)));
1189 ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1));
1190 if (CI && (ICI->getOperand(0) == Val || NegOffset)) {
1191 // Calculate the range of values that are allowed by the comparison
1192 ConstantRange CmpRange(CI->getValue());
1193 ConstantRange TrueValues =
1194 ConstantRange::makeAllowedICmpRegion(ICI->getPredicate(), CmpRange);
1196 if (NegOffset) // Apply the offset from above.
1197 TrueValues = TrueValues.subtract(NegOffset->getValue());
1199 // If we're interested in the false dest, invert the condition.
1200 if (!isTrueDest) TrueValues = TrueValues.inverse();
1202 Result = LVILatticeVal::getRange(std::move(TrueValues));
1210 /// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1211 /// Val is not constrained on the edge. Result is unspecified if return value
1213 static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1214 BasicBlock *BBTo, LVILatticeVal &Result) {
1215 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1216 // know that v != 0.
1217 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1218 // If this is a conditional branch and only one successor goes to BBTo, then
1219 // we may be able to infer something from the condition.
1220 if (BI->isConditional() &&
1221 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1222 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1223 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1224 "BBTo isn't a successor of BBFrom");
1226 // If V is the condition of the branch itself, then we know exactly what
1228 if (BI->getCondition() == Val) {
1229 Result = LVILatticeVal::get(ConstantInt::get(
1230 Type::getInt1Ty(Val->getContext()), isTrueDest));
1234 // If the condition of the branch is an equality comparison, we may be
1235 // able to infer the value.
1236 if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
1237 if (getValueFromFromCondition(Val, ICI, Result, isTrueDest))
1242 // If the edge was formed by a switch on the value, then we may know exactly
1244 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1245 if (SI->getCondition() != Val)
1248 bool DefaultCase = SI->getDefaultDest() == BBTo;
1249 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1250 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1252 for (SwitchInst::CaseIt i : SI->cases()) {
1253 ConstantRange EdgeVal(i.getCaseValue()->getValue());
1255 // It is possible that the default destination is the destination of
1256 // some cases. There is no need to perform difference for those cases.
1257 if (i.getCaseSuccessor() != BBTo)
1258 EdgesVals = EdgesVals.difference(EdgeVal);
1259 } else if (i.getCaseSuccessor() == BBTo)
1260 EdgesVals = EdgesVals.unionWith(EdgeVal);
1262 Result = LVILatticeVal::getRange(std::move(EdgesVals));
1268 /// \brief Compute the value of Val on the edge BBFrom -> BBTo or the value at
1269 /// the basic block if the edge does not constrain Val.
1270 bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1271 BasicBlock *BBTo, LVILatticeVal &Result,
1272 Instruction *CxtI) {
1273 // If already a constant, there is nothing to compute.
1274 if (Constant *VC = dyn_cast<Constant>(Val)) {
1275 Result = LVILatticeVal::get(VC);
1279 LVILatticeVal LocalResult;
1280 if (!getEdgeValueLocal(Val, BBFrom, BBTo, LocalResult))
1281 // If we couldn't constrain the value on the edge, LocalResult doesn't
1282 // provide any information.
1283 LocalResult.markOverdefined();
1285 if (hasSingleValue(LocalResult)) {
1286 // Can't get any more precise here
1287 Result = LocalResult;
1291 if (!hasBlockValue(Val, BBFrom)) {
1292 if (pushBlockValue(std::make_pair(BBFrom, Val)))
1294 // No new information.
1295 Result = LocalResult;
1299 // Try to intersect ranges of the BB and the constraint on the edge.
1300 LVILatticeVal InBlock = getBlockValue(Val, BBFrom);
1301 intersectAssumeBlockValueConstantRange(Val, InBlock, BBFrom->getTerminator());
1302 // We can use the context instruction (generically the ultimate instruction
1303 // the calling pass is trying to simplify) here, even though the result of
1304 // this function is generally cached when called from the solve* functions
1305 // (and that cached result might be used with queries using a different
1306 // context instruction), because when this function is called from the solve*
1307 // functions, the context instruction is not provided. When called from
1308 // LazyValueInfoCache::getValueOnEdge, the context instruction is provided,
1309 // but then the result is not cached.
1310 intersectAssumeBlockValueConstantRange(Val, InBlock, CxtI);
1312 Result = intersect(LocalResult, InBlock);
1316 LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB,
1317 Instruction *CxtI) {
1318 DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1319 << BB->getName() << "'\n");
1321 assert(BlockValueStack.empty() && BlockValueSet.empty());
1322 if (!hasBlockValue(V, BB)) {
1323 pushBlockValue(std::make_pair(BB, V));
1326 LVILatticeVal Result = getBlockValue(V, BB);
1327 intersectAssumeBlockValueConstantRange(V, Result, CxtI);
1329 DEBUG(dbgs() << " Result = " << Result << "\n");
1333 LVILatticeVal LazyValueInfoCache::getValueAt(Value *V, Instruction *CxtI) {
1334 DEBUG(dbgs() << "LVI Getting value " << *V << " at '"
1335 << CxtI->getName() << "'\n");
1337 if (auto *C = dyn_cast<Constant>(V))
1338 return LVILatticeVal::get(C);
1340 LVILatticeVal Result = LVILatticeVal::getOverdefined();
1341 if (auto *I = dyn_cast<Instruction>(V))
1342 Result = getFromRangeMetadata(I);
1343 intersectAssumeBlockValueConstantRange(V, Result, CxtI);
1345 DEBUG(dbgs() << " Result = " << Result << "\n");
1349 LVILatticeVal LazyValueInfoCache::
1350 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1351 Instruction *CxtI) {
1352 DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1353 << FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
1355 LVILatticeVal Result;
1356 if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) {
1358 bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result, CxtI);
1360 assert(WasFastQuery && "More work to do after problem solved?");
1363 DEBUG(dbgs() << " Result = " << Result << "\n");
1367 void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1368 BasicBlock *NewSucc) {
1369 // When an edge in the graph has been threaded, values that we could not
1370 // determine a value for before (i.e. were marked overdefined) may be
1371 // possible to solve now. We do NOT try to proactively update these values.
1372 // Instead, we clear their entries from the cache, and allow lazy updating to
1373 // recompute them when needed.
1375 // The updating process is fairly simple: we need to drop cached info
1376 // for all values that were marked overdefined in OldSucc, and for those same
1377 // values in any successor of OldSucc (except NewSucc) in which they were
1378 // also marked overdefined.
1379 std::vector<BasicBlock*> worklist;
1380 worklist.push_back(OldSucc);
1382 auto I = OverDefinedCache.find(OldSucc);
1383 if (I == OverDefinedCache.end())
1384 return; // Nothing to process here.
1385 SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
1387 // Use a worklist to perform a depth-first search of OldSucc's successors.
1388 // NOTE: We do not need a visited list since any blocks we have already
1389 // visited will have had their overdefined markers cleared already, and we
1390 // thus won't loop to their successors.
1391 while (!worklist.empty()) {
1392 BasicBlock *ToUpdate = worklist.back();
1393 worklist.pop_back();
1395 // Skip blocks only accessible through NewSucc.
1396 if (ToUpdate == NewSucc) continue;
1398 bool changed = false;
1399 for (Value *V : ValsToClear) {
1400 // If a value was marked overdefined in OldSucc, and is here too...
1401 auto OI = OverDefinedCache.find(ToUpdate);
1402 if (OI == OverDefinedCache.end())
1404 SmallPtrSetImpl<Value *> &ValueSet = OI->second;
1405 if (!ValueSet.count(V))
1409 if (ValueSet.empty())
1410 OverDefinedCache.erase(OI);
1412 // If we removed anything, then we potentially need to update
1413 // blocks successors too.
1417 if (!changed) continue;
1419 worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
1423 //===----------------------------------------------------------------------===//
1424 // LazyValueInfo Impl
1425 //===----------------------------------------------------------------------===//
1427 /// This lazily constructs the LazyValueInfoCache.
1428 static LazyValueInfoCache &getCache(void *&PImpl, AssumptionCache *AC,
1429 const DataLayout *DL,
1430 DominatorTree *DT = nullptr) {
1432 assert(DL && "getCache() called with a null DataLayout");
1433 PImpl = new LazyValueInfoCache(AC, *DL, DT);
1435 return *static_cast<LazyValueInfoCache*>(PImpl);
1438 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1439 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1440 const DataLayout &DL = F.getParent()->getDataLayout();
1442 DominatorTreeWrapperPass *DTWP =
1443 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1444 Info.DT = DTWP ? &DTWP->getDomTree() : nullptr;
1445 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1448 getCache(Info.PImpl, Info.AC, &DL, Info.DT).clear();
1454 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1455 AU.setPreservesAll();
1456 AU.addRequired<AssumptionCacheTracker>();
1457 AU.addRequired<TargetLibraryInfoWrapperPass>();
1460 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1462 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1464 void LazyValueInfo::releaseMemory() {
1465 // If the cache was allocated, free it.
1467 delete &getCache(PImpl, AC, nullptr);
1472 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1474 LazyValueInfo LazyValueAnalysis::run(Function &F, FunctionAnalysisManager &FAM) {
1475 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1476 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1477 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
1479 return LazyValueInfo(&AC, &TLI, DT);
1483 /// Returns true if we can statically tell that this value will never be a
1484 /// "useful" constant. In practice, this means we've got something like an
1485 /// alloca or a malloc call for which a comparison against a constant can
1486 /// only be guarding dead code. Note that we are potentially giving up some
1487 /// precision in dead code (a constant result) in favour of avoiding a
1488 /// expensive search for a easily answered common query.
1489 static bool isKnownNonConstant(Value *V) {
1490 V = V->stripPointerCasts();
1491 // The return val of alloc cannot be a Constant.
1492 if (isa<AllocaInst>(V))
1497 Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
1498 Instruction *CxtI) {
1499 // Bail out early if V is known not to be a Constant.
1500 if (isKnownNonConstant(V))
1503 const DataLayout &DL = BB->getModule()->getDataLayout();
1504 LVILatticeVal Result =
1505 getCache(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1507 if (Result.isConstant())
1508 return Result.getConstant();
1509 if (Result.isConstantRange()) {
1510 ConstantRange CR = Result.getConstantRange();
1511 if (const APInt *SingleVal = CR.getSingleElement())
1512 return ConstantInt::get(V->getContext(), *SingleVal);
1517 ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
1518 Instruction *CxtI) {
1519 assert(V->getType()->isIntegerTy());
1520 unsigned Width = V->getType()->getIntegerBitWidth();
1521 const DataLayout &DL = BB->getModule()->getDataLayout();
1522 LVILatticeVal Result =
1523 getCache(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1524 assert(!Result.isConstant());
1525 if (Result.isUndefined())
1526 return ConstantRange(Width, /*isFullSet=*/false);
1527 if (Result.isConstantRange())
1528 return Result.getConstantRange();
1529 return ConstantRange(Width, /*isFullSet=*/true);
1532 /// Determine whether the specified value is known to be a
1533 /// constant on the specified edge. Return null if not.
1534 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1536 Instruction *CxtI) {
1537 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1538 LVILatticeVal Result =
1539 getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1541 if (Result.isConstant())
1542 return Result.getConstant();
1543 if (Result.isConstantRange()) {
1544 ConstantRange CR = Result.getConstantRange();
1545 if (const APInt *SingleVal = CR.getSingleElement())
1546 return ConstantInt::get(V->getContext(), *SingleVal);
1551 static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
1552 LVILatticeVal &Result,
1553 const DataLayout &DL,
1554 TargetLibraryInfo *TLI) {
1556 // If we know the value is a constant, evaluate the conditional.
1557 Constant *Res = nullptr;
1558 if (Result.isConstant()) {
1559 Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
1561 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1562 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1563 return LazyValueInfo::Unknown;
1566 if (Result.isConstantRange()) {
1567 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1568 if (!CI) return LazyValueInfo::Unknown;
1570 ConstantRange CR = Result.getConstantRange();
1571 if (Pred == ICmpInst::ICMP_EQ) {
1572 if (!CR.contains(CI->getValue()))
1573 return LazyValueInfo::False;
1575 if (CR.isSingleElement() && CR.contains(CI->getValue()))
1576 return LazyValueInfo::True;
1577 } else if (Pred == ICmpInst::ICMP_NE) {
1578 if (!CR.contains(CI->getValue()))
1579 return LazyValueInfo::True;
1581 if (CR.isSingleElement() && CR.contains(CI->getValue()))
1582 return LazyValueInfo::False;
1585 // Handle more complex predicates.
1586 ConstantRange TrueValues =
1587 ICmpInst::makeConstantRange((ICmpInst::Predicate)Pred, CI->getValue());
1588 if (TrueValues.contains(CR))
1589 return LazyValueInfo::True;
1590 if (TrueValues.inverse().contains(CR))
1591 return LazyValueInfo::False;
1592 return LazyValueInfo::Unknown;
1595 if (Result.isNotConstant()) {
1596 // If this is an equality comparison, we can try to fold it knowing that
1598 if (Pred == ICmpInst::ICMP_EQ) {
1599 // !C1 == C -> false iff C1 == C.
1600 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1601 Result.getNotConstant(), C, DL,
1603 if (Res->isNullValue())
1604 return LazyValueInfo::False;
1605 } else if (Pred == ICmpInst::ICMP_NE) {
1606 // !C1 != C -> true iff C1 == C.
1607 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1608 Result.getNotConstant(), C, DL,
1610 if (Res->isNullValue())
1611 return LazyValueInfo::True;
1613 return LazyValueInfo::Unknown;
1616 return LazyValueInfo::Unknown;
1619 /// Determine whether the specified value comparison with a constant is known to
1620 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1621 LazyValueInfo::Tristate
1622 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1623 BasicBlock *FromBB, BasicBlock *ToBB,
1624 Instruction *CxtI) {
1625 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1626 LVILatticeVal Result =
1627 getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1629 return getPredicateResult(Pred, C, Result, DL, TLI);
1632 LazyValueInfo::Tristate
1633 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1634 Instruction *CxtI) {
1635 // Is or is not NonNull are common predicates being queried. If
1636 // isKnownNonNull can tell us the result of the predicate, we can
1637 // return it quickly. But this is only a fastpath, and falling
1638 // through would still be correct.
1639 if (V->getType()->isPointerTy() && C->isNullValue() &&
1640 isKnownNonNull(V->stripPointerCasts())) {
1641 if (Pred == ICmpInst::ICMP_EQ)
1642 return LazyValueInfo::False;
1643 else if (Pred == ICmpInst::ICMP_NE)
1644 return LazyValueInfo::True;
1646 const DataLayout &DL = CxtI->getModule()->getDataLayout();
1647 LVILatticeVal Result = getCache(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
1648 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1652 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1653 // LVI as a whole tries to compute a lattice value which is conservatively
1654 // correct at a given location. In this case, we have a predicate which we
1655 // weren't able to prove about the merged result, and we're pushing that
1656 // predicate back along each incoming edge to see if we can prove it
1657 // separately for each input. As a motivating example, consider:
1659 // %v1 = ... ; constantrange<1, 5>
1662 // %v2 = ... ; constantrange<10, 20>
1665 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1666 // %pred = icmp eq i32 %phi, 8
1667 // We can't tell from the lattice value for '%phi' that '%pred' is false
1668 // along each path, but by checking the predicate over each input separately,
1670 // We limit the search to one step backwards from the current BB and value.
1671 // We could consider extending this to search further backwards through the
1672 // CFG and/or value graph, but there are non-obvious compile time vs quality
1675 BasicBlock *BB = CxtI->getParent();
1677 // Function entry or an unreachable block. Bail to avoid confusing
1679 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1683 // If V is a PHI node in the same block as the context, we need to ask
1684 // questions about the predicate as applied to the incoming value along
1685 // each edge. This is useful for eliminating cases where the predicate is
1686 // known along all incoming edges.
1687 if (auto *PHI = dyn_cast<PHINode>(V))
1688 if (PHI->getParent() == BB) {
1689 Tristate Baseline = Unknown;
1690 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1691 Value *Incoming = PHI->getIncomingValue(i);
1692 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1693 // Note that PredBB may be BB itself.
1694 Tristate Result = getPredicateOnEdge(Pred, Incoming, C, PredBB, BB,
1697 // Keep going as long as we've seen a consistent known result for
1699 Baseline = (i == 0) ? Result /* First iteration */
1700 : (Baseline == Result ? Baseline : Unknown); /* All others */
1701 if (Baseline == Unknown)
1704 if (Baseline != Unknown)
1708 // For a comparison where the V is outside this block, it's possible
1709 // that we've branched on it before. Look to see if the value is known
1710 // on all incoming edges.
1711 if (!isa<Instruction>(V) ||
1712 cast<Instruction>(V)->getParent() != BB) {
1713 // For predecessor edge, determine if the comparison is true or false
1714 // on that edge. If they're all true or all false, we can conclude
1715 // the value of the comparison in this block.
1716 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1717 if (Baseline != Unknown) {
1718 // Check that all remaining incoming values match the first one.
1719 while (++PI != PE) {
1720 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1721 if (Ret != Baseline) break;
1723 // If we terminated early, then one of the values didn't match.
1733 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1734 BasicBlock *NewSucc) {
1736 const DataLayout &DL = PredBB->getModule()->getDataLayout();
1737 getCache(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
1741 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1743 const DataLayout &DL = BB->getModule()->getDataLayout();
1744 getCache(PImpl, AC, &DL, DT).eraseBlock(BB);