1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interface for lazy computation of value constraint
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LazyValueInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/AssemblyAnnotationWriter.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/ConstantRange.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/LLVMContext.h"
32 #include "llvm/IR/PatternMatch.h"
33 #include "llvm/IR/ValueHandle.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/FormattedStream.h"
36 #include "llvm/Support/raw_ostream.h"
40 using namespace PatternMatch;
42 #define DEBUG_TYPE "lazy-value-info"
44 // This is the number of worklist items we will process to try to discover an
45 // answer for a given value.
46 static const unsigned MaxProcessedPerValue = 500;
48 char LazyValueInfoWrapperPass::ID = 0;
49 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
50 "Lazy Value Information Analysis", false, true)
51 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
52 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
53 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
54 "Lazy Value Information Analysis", false, true)
57 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
60 AnalysisKey LazyValueAnalysis::Key;
62 //===----------------------------------------------------------------------===//
64 //===----------------------------------------------------------------------===//
66 /// This is the information tracked by LazyValueInfo for each value.
68 /// FIXME: This is basically just for bringup, this can be made a lot more rich
74 /// This Value has no known value yet. As a result, this implies the
75 /// producing instruction is dead. Caution: We use this as the starting
76 /// state in our local meet rules. In this usage, it's taken to mean
77 /// "nothing known yet".
80 /// This Value has a specific constant value. (For constant integers,
81 /// constantrange is used instead. Integer typed constantexprs can appear
85 /// This Value is known to not have the specified value. (For constant
86 /// integers, constantrange is used instead. As above, integer typed
87 /// constantexprs can appear here.)
90 /// The Value falls within this range. (Used only for integer typed values.)
93 /// We can not precisely model the dynamic values this value might take.
97 /// Val: This stores the current lattice value along with the Constant* for
98 /// the constant if this is a 'constant' or 'notconstant' value.
104 LVILatticeVal() : Tag(undefined), Val(nullptr), Range(1, true) {}
106 static LVILatticeVal get(Constant *C) {
108 if (!isa<UndefValue>(C))
112 static LVILatticeVal getNot(Constant *C) {
114 if (!isa<UndefValue>(C))
115 Res.markNotConstant(C);
118 static LVILatticeVal getRange(ConstantRange CR) {
120 Res.markConstantRange(std::move(CR));
123 static LVILatticeVal getOverdefined() {
125 Res.markOverdefined();
129 bool isUndefined() const { return Tag == undefined; }
130 bool isConstant() const { return Tag == constant; }
131 bool isNotConstant() const { return Tag == notconstant; }
132 bool isConstantRange() const { return Tag == constantrange; }
133 bool isOverdefined() const { return Tag == overdefined; }
135 Constant *getConstant() const {
136 assert(isConstant() && "Cannot get the constant of a non-constant!");
140 Constant *getNotConstant() const {
141 assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
145 const ConstantRange &getConstantRange() const {
146 assert(isConstantRange() &&
147 "Cannot get the constant-range of a non-constant-range!");
152 void markOverdefined() {
158 void markConstant(Constant *V) {
159 assert(V && "Marking constant with NULL");
160 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
161 markConstantRange(ConstantRange(CI->getValue()));
164 if (isa<UndefValue>(V))
167 assert((!isConstant() || getConstant() == V) &&
168 "Marking constant with different value");
169 assert(isUndefined());
174 void markNotConstant(Constant *V) {
175 assert(V && "Marking constant with NULL");
176 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
177 markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
180 if (isa<UndefValue>(V))
183 assert((!isConstant() || getConstant() != V) &&
184 "Marking constant !constant with same value");
185 assert((!isNotConstant() || getNotConstant() == V) &&
186 "Marking !constant with different value");
187 assert(isUndefined() || isConstant());
192 void markConstantRange(ConstantRange NewR) {
193 if (isConstantRange()) {
194 if (NewR.isEmptySet())
197 Range = std::move(NewR);
202 assert(isUndefined());
203 if (NewR.isEmptySet())
207 Range = std::move(NewR);
213 /// Merge the specified lattice value into this one, updating this
214 /// one and returning true if anything changed.
215 void mergeIn(const LVILatticeVal &RHS, const DataLayout &DL) {
216 if (RHS.isUndefined() || isOverdefined())
218 if (RHS.isOverdefined()) {
229 if (RHS.isConstant() && Val == RHS.Val)
235 if (isNotConstant()) {
236 if (RHS.isNotConstant() && Val == RHS.Val)
242 assert(isConstantRange() && "New LVILattice type?");
243 if (!RHS.isConstantRange()) {
244 // We can get here if we've encountered a constantexpr of integer type
245 // and merge it with a constantrange.
249 ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
250 if (NewR.isFullSet())
253 markConstantRange(std::move(NewR));
257 } // end anonymous namespace.
260 raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val)
262 raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
263 if (Val.isUndefined())
264 return OS << "undefined";
265 if (Val.isOverdefined())
266 return OS << "overdefined";
268 if (Val.isNotConstant())
269 return OS << "notconstant<" << *Val.getNotConstant() << '>';
270 if (Val.isConstantRange())
271 return OS << "constantrange<" << Val.getConstantRange().getLower() << ", "
272 << Val.getConstantRange().getUpper() << '>';
273 return OS << "constant<" << *Val.getConstant() << '>';
277 /// Returns true if this lattice value represents at most one possible value.
278 /// This is as precise as any lattice value can get while still representing
280 static bool hasSingleValue(const LVILatticeVal &Val) {
281 if (Val.isConstantRange() &&
282 Val.getConstantRange().isSingleElement())
283 // Integer constants are single element ranges
285 if (Val.isConstant())
286 // Non integer constants
291 /// Combine two sets of facts about the same value into a single set of
292 /// facts. Note that this method is not suitable for merging facts along
293 /// different paths in a CFG; that's what the mergeIn function is for. This
294 /// is for merging facts gathered about the same value at the same location
295 /// through two independent means.
297 /// * This method does not promise to return the most precise possible lattice
298 /// value implied by A and B. It is allowed to return any lattice element
299 /// which is at least as strong as *either* A or B (unless our facts
300 /// conflict, see below).
301 /// * Due to unreachable code, the intersection of two lattice values could be
302 /// contradictory. If this happens, we return some valid lattice value so as
303 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
304 /// we do not make this guarantee. TODO: This would be a useful enhancement.
305 static LVILatticeVal intersect(const LVILatticeVal &A, const LVILatticeVal &B) {
306 // Undefined is the strongest state. It means the value is known to be along
307 // an unreachable path.
313 // If we gave up for one, but got a useable fact from the other, use it.
314 if (A.isOverdefined())
316 if (B.isOverdefined())
319 // Can't get any more precise than constants.
320 if (hasSingleValue(A))
322 if (hasSingleValue(B))
325 // Could be either constant range or not constant here.
326 if (!A.isConstantRange() || !B.isConstantRange()) {
327 // TODO: Arbitrary choice, could be improved
331 // Intersect two constant ranges
332 ConstantRange Range =
333 A.getConstantRange().intersectWith(B.getConstantRange());
334 // Note: An empty range is implicitly converted to overdefined internally.
335 // TODO: We could instead use Undefined here since we've proven a conflict
336 // and thus know this path must be unreachable.
337 return LVILatticeVal::getRange(std::move(Range));
340 //===----------------------------------------------------------------------===//
341 // LazyValueInfoCache Decl
342 //===----------------------------------------------------------------------===//
345 /// A callback value handle updates the cache when values are erased.
346 class LazyValueInfoCache;
347 struct LVIValueHandle final : public CallbackVH {
348 // Needs to access getValPtr(), which is protected.
349 friend struct DenseMapInfo<LVIValueHandle>;
351 LazyValueInfoCache *Parent;
353 LVIValueHandle(Value *V, LazyValueInfoCache *P)
354 : CallbackVH(V), Parent(P) { }
356 void deleted() override;
357 void allUsesReplacedWith(Value *V) override {
361 } // end anonymous namespace
364 /// This is the cache kept by LazyValueInfo which
365 /// maintains information about queries across the clients' queries.
366 class LazyValueInfoCache {
367 /// This is all of the cached block information for exactly one Value*.
368 /// The entries are sorted by the BasicBlock* of the
369 /// entries, allowing us to do a lookup with a binary search.
370 /// Over-defined lattice values are recorded in OverDefinedCache to reduce
372 struct ValueCacheEntryTy {
373 ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {}
374 LVIValueHandle Handle;
375 SmallDenseMap<PoisoningVH<BasicBlock>, LVILatticeVal, 4> BlockVals;
378 /// This tracks, on a per-block basis, the set of values that are
379 /// over-defined at the end of that block.
380 typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>>
382 /// Keep track of all blocks that we have ever seen, so we
383 /// don't spend time removing unused blocks from our caches.
384 DenseSet<PoisoningVH<BasicBlock> > SeenBlocks;
386 /// This is all of the cached information for all values,
387 /// mapped from Value* to key information.
388 DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache;
389 OverDefinedCacheTy OverDefinedCache;
393 void insertResult(Value *Val, BasicBlock *BB, const LVILatticeVal &Result) {
394 SeenBlocks.insert(BB);
396 // Insert over-defined values into their own cache to reduce memory
398 if (Result.isOverdefined())
399 OverDefinedCache[BB].insert(Val);
401 auto It = ValueCache.find_as(Val);
402 if (It == ValueCache.end()) {
403 ValueCache[Val] = make_unique<ValueCacheEntryTy>(Val, this);
404 It = ValueCache.find_as(Val);
405 assert(It != ValueCache.end() && "Val was just added to the map!");
407 It->second->BlockVals[BB] = Result;
411 bool isOverdefined(Value *V, BasicBlock *BB) const {
412 auto ODI = OverDefinedCache.find(BB);
414 if (ODI == OverDefinedCache.end())
417 return ODI->second.count(V);
420 bool hasCachedValueInfo(Value *V, BasicBlock *BB) const {
421 if (isOverdefined(V, BB))
424 auto I = ValueCache.find_as(V);
425 if (I == ValueCache.end())
428 return I->second->BlockVals.count(BB);
431 LVILatticeVal getCachedValueInfo(Value *V, BasicBlock *BB) const {
432 if (isOverdefined(V, BB))
433 return LVILatticeVal::getOverdefined();
435 auto I = ValueCache.find_as(V);
436 if (I == ValueCache.end())
437 return LVILatticeVal();
438 auto BBI = I->second->BlockVals.find(BB);
439 if (BBI == I->second->BlockVals.end())
440 return LVILatticeVal();
444 /// clear - Empty the cache.
448 OverDefinedCache.clear();
451 /// Inform the cache that a given value has been deleted.
452 void eraseValue(Value *V);
454 /// This is part of the update interface to inform the cache
455 /// that a block has been deleted.
456 void eraseBlock(BasicBlock *BB);
458 /// Updates the cache to remove any influence an overdefined value in
459 /// OldSucc might have (unless also overdefined in NewSucc). This just
460 /// flushes elements from the cache and does not add any.
461 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
463 friend struct LVIValueHandle;
467 void LazyValueInfoCache::eraseValue(Value *V) {
468 for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) {
469 // Copy and increment the iterator immediately so we can erase behind
472 SmallPtrSetImpl<Value *> &ValueSet = Iter->second;
474 if (ValueSet.empty())
475 OverDefinedCache.erase(Iter);
481 void LVIValueHandle::deleted() {
482 // This erasure deallocates *this, so it MUST happen after we're done
483 // using any and all members of *this.
484 Parent->eraseValue(*this);
487 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
488 // Shortcut if we have never seen this block.
489 DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
490 if (I == SeenBlocks.end())
494 auto ODI = OverDefinedCache.find(BB);
495 if (ODI != OverDefinedCache.end())
496 OverDefinedCache.erase(ODI);
498 for (auto &I : ValueCache)
499 I.second->BlockVals.erase(BB);
502 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
503 BasicBlock *NewSucc) {
504 // When an edge in the graph has been threaded, values that we could not
505 // determine a value for before (i.e. were marked overdefined) may be
506 // possible to solve now. We do NOT try to proactively update these values.
507 // Instead, we clear their entries from the cache, and allow lazy updating to
508 // recompute them when needed.
510 // The updating process is fairly simple: we need to drop cached info
511 // for all values that were marked overdefined in OldSucc, and for those same
512 // values in any successor of OldSucc (except NewSucc) in which they were
513 // also marked overdefined.
514 std::vector<BasicBlock*> worklist;
515 worklist.push_back(OldSucc);
517 auto I = OverDefinedCache.find(OldSucc);
518 if (I == OverDefinedCache.end())
519 return; // Nothing to process here.
520 SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
522 // Use a worklist to perform a depth-first search of OldSucc's successors.
523 // NOTE: We do not need a visited list since any blocks we have already
524 // visited will have had their overdefined markers cleared already, and we
525 // thus won't loop to their successors.
526 while (!worklist.empty()) {
527 BasicBlock *ToUpdate = worklist.back();
530 // Skip blocks only accessible through NewSucc.
531 if (ToUpdate == NewSucc) continue;
533 // If a value was marked overdefined in OldSucc, and is here too...
534 auto OI = OverDefinedCache.find(ToUpdate);
535 if (OI == OverDefinedCache.end())
537 SmallPtrSetImpl<Value *> &ValueSet = OI->second;
539 bool changed = false;
540 for (Value *V : ValsToClear) {
541 if (!ValueSet.erase(V))
544 // If we removed anything, then we potentially need to update
545 // blocks successors too.
548 if (ValueSet.empty()) {
549 OverDefinedCache.erase(OI);
554 if (!changed) continue;
556 worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
562 /// An assembly annotator class to print LazyValueCache information in
564 class LazyValueInfoImpl;
565 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
566 LazyValueInfoImpl *LVIImpl;
567 // While analyzing which blocks we can solve values for, we need the dominator
568 // information. Since this is an optional parameter in LVI, we require this
569 // DomTreeAnalysis pass in the printer pass, and pass the dominator
570 // tree to the LazyValueInfoAnnotatedWriter.
574 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
575 : LVIImpl(L), DT(DTree) {}
577 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
578 formatted_raw_ostream &OS);
580 virtual void emitInstructionAnnot(const Instruction *I,
581 formatted_raw_ostream &OS);
585 // The actual implementation of the lazy analysis and update. Note that the
586 // inheritance from LazyValueInfoCache is intended to be temporary while
587 // splitting the code and then transitioning to a has-a relationship.
588 class LazyValueInfoImpl {
590 /// Cached results from previous queries
591 LazyValueInfoCache TheCache;
593 /// This stack holds the state of the value solver during a query.
594 /// It basically emulates the callstack of the naive
595 /// recursive value lookup process.
596 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
598 /// Keeps track of which block-value pairs are in BlockValueStack.
599 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
601 /// Push BV onto BlockValueStack unless it's already in there.
602 /// Returns true on success.
603 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
604 if (!BlockValueSet.insert(BV).second)
605 return false; // It's already in the stack.
607 DEBUG(dbgs() << "PUSH: " << *BV.second << " in " << BV.first->getName()
609 BlockValueStack.push_back(BV);
613 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
614 const DataLayout &DL; ///< A mandatory DataLayout
615 DominatorTree *DT; ///< An optional DT pointer.
617 LVILatticeVal getBlockValue(Value *Val, BasicBlock *BB);
618 bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T,
619 LVILatticeVal &Result, Instruction *CxtI = nullptr);
620 bool hasBlockValue(Value *Val, BasicBlock *BB);
622 // These methods process one work item and may add more. A false value
623 // returned means that the work item was not completely processed and must
624 // be revisited after going through the new items.
625 bool solveBlockValue(Value *Val, BasicBlock *BB);
626 bool solveBlockValueImpl(LVILatticeVal &Res, Value *Val, BasicBlock *BB);
627 bool solveBlockValueNonLocal(LVILatticeVal &BBLV, Value *Val, BasicBlock *BB);
628 bool solveBlockValuePHINode(LVILatticeVal &BBLV, PHINode *PN, BasicBlock *BB);
629 bool solveBlockValueSelect(LVILatticeVal &BBLV, SelectInst *S,
631 bool solveBlockValueBinaryOp(LVILatticeVal &BBLV, BinaryOperator *BBI,
633 bool solveBlockValueCast(LVILatticeVal &BBLV, CastInst *CI,
635 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
642 /// This is the query interface to determine the lattice
643 /// value for the specified Value* at the end of the specified block.
644 LVILatticeVal getValueInBlock(Value *V, BasicBlock *BB,
645 Instruction *CxtI = nullptr);
647 /// This is the query interface to determine the lattice
648 /// value for the specified Value* at the specified instruction (generally
649 /// from an assume intrinsic).
650 LVILatticeVal getValueAt(Value *V, Instruction *CxtI);
652 /// This is the query interface to determine the lattice
653 /// value for the specified Value* that is true on the specified edge.
654 LVILatticeVal getValueOnEdge(Value *V, BasicBlock *FromBB,BasicBlock *ToBB,
655 Instruction *CxtI = nullptr);
657 /// Complete flush all previously computed values
662 /// Printing the LazyValueInfo Analysis.
663 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
664 LazyValueInfoAnnotatedWriter Writer(this, DTree);
665 F.print(OS, &Writer);
668 /// This is part of the update interface to inform the cache
669 /// that a block has been deleted.
670 void eraseBlock(BasicBlock *BB) {
671 TheCache.eraseBlock(BB);
674 /// This is the update interface to inform the cache that an edge from
675 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
676 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
678 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
679 DominatorTree *DT = nullptr)
680 : AC(AC), DL(DL), DT(DT) {}
682 } // end anonymous namespace
685 void LazyValueInfoImpl::solve() {
686 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
687 BlockValueStack.begin(), BlockValueStack.end());
689 unsigned processedCount = 0;
690 while (!BlockValueStack.empty()) {
692 // Abort if we have to process too many values to get a result for this one.
693 // Because of the design of the overdefined cache currently being per-block
694 // to avoid naming-related issues (IE it wants to try to give different
695 // results for the same name in different blocks), overdefined results don't
696 // get cached globally, which in turn means we will often try to rediscover
697 // the same overdefined result again and again. Once something like
698 // PredicateInfo is used in LVI or CVP, we should be able to make the
699 // overdefined cache global, and remove this throttle.
700 if (processedCount > MaxProcessedPerValue) {
701 DEBUG(dbgs() << "Giving up on stack because we are getting too deep\n");
702 // Fill in the original values
703 while (!StartingStack.empty()) {
704 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
705 TheCache.insertResult(e.second, e.first,
706 LVILatticeVal::getOverdefined());
707 StartingStack.pop_back();
709 BlockValueSet.clear();
710 BlockValueStack.clear();
713 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
714 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
716 if (solveBlockValue(e.second, e.first)) {
717 // The work item was completely processed.
718 assert(BlockValueStack.back() == e && "Nothing should have been pushed!");
719 assert(TheCache.hasCachedValueInfo(e.second, e.first) &&
720 "Result should be in cache!");
722 DEBUG(dbgs() << "POP " << *e.second << " in " << e.first->getName()
723 << " = " << TheCache.getCachedValueInfo(e.second, e.first) << "\n");
725 BlockValueStack.pop_back();
726 BlockValueSet.erase(e);
728 // More work needs to be done before revisiting.
729 assert(BlockValueStack.back() != e && "Stack should have been pushed!");
734 bool LazyValueInfoImpl::hasBlockValue(Value *Val, BasicBlock *BB) {
735 // If already a constant, there is nothing to compute.
736 if (isa<Constant>(Val))
739 return TheCache.hasCachedValueInfo(Val, BB);
742 LVILatticeVal LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB) {
743 // If already a constant, there is nothing to compute.
744 if (Constant *VC = dyn_cast<Constant>(Val))
745 return LVILatticeVal::get(VC);
747 return TheCache.getCachedValueInfo(Val, BB);
750 static LVILatticeVal getFromRangeMetadata(Instruction *BBI) {
751 switch (BBI->getOpcode()) {
753 case Instruction::Load:
754 case Instruction::Call:
755 case Instruction::Invoke:
756 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
757 if (isa<IntegerType>(BBI->getType())) {
758 return LVILatticeVal::getRange(getConstantRangeFromMetadata(*Ranges));
762 // Nothing known - will be intersected with other facts
763 return LVILatticeVal::getOverdefined();
766 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
767 if (isa<Constant>(Val))
770 if (TheCache.hasCachedValueInfo(Val, BB)) {
771 // If we have a cached value, use that.
772 DEBUG(dbgs() << " reuse BB '" << BB->getName()
773 << "' val=" << TheCache.getCachedValueInfo(Val, BB) << '\n');
775 // Since we're reusing a cached value, we don't need to update the
776 // OverDefinedCache. The cache will have been properly updated whenever the
777 // cached value was inserted.
781 // Hold off inserting this value into the Cache in case we have to return
782 // false and come back later.
784 if (!solveBlockValueImpl(Res, Val, BB))
785 // Work pushed, will revisit
788 TheCache.insertResult(Val, BB, Res);
792 bool LazyValueInfoImpl::solveBlockValueImpl(LVILatticeVal &Res,
793 Value *Val, BasicBlock *BB) {
795 Instruction *BBI = dyn_cast<Instruction>(Val);
796 if (!BBI || BBI->getParent() != BB)
797 return solveBlockValueNonLocal(Res, Val, BB);
799 if (PHINode *PN = dyn_cast<PHINode>(BBI))
800 return solveBlockValuePHINode(Res, PN, BB);
802 if (auto *SI = dyn_cast<SelectInst>(BBI))
803 return solveBlockValueSelect(Res, SI, BB);
805 // If this value is a nonnull pointer, record it's range and bailout. Note
806 // that for all other pointer typed values, we terminate the search at the
807 // definition. We could easily extend this to look through geps, bitcasts,
808 // and the like to prove non-nullness, but it's not clear that's worth it
809 // compile time wise. The context-insensitive value walk done inside
810 // isKnownNonNull gets most of the profitable cases at much less expense.
811 // This does mean that we have a sensativity to where the defining
812 // instruction is placed, even if it could legally be hoisted much higher.
813 // That is unfortunate.
814 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
815 if (PT && isKnownNonNull(BBI)) {
816 Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT));
819 if (BBI->getType()->isIntegerTy()) {
820 if (auto *CI = dyn_cast<CastInst>(BBI))
821 return solveBlockValueCast(Res, CI, BB);
823 BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
824 if (BO && isa<ConstantInt>(BO->getOperand(1)))
825 return solveBlockValueBinaryOp(Res, BO, BB);
828 DEBUG(dbgs() << " compute BB '" << BB->getName()
829 << "' - unknown inst def found.\n");
830 Res = getFromRangeMetadata(BBI);
834 static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
835 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
836 return L->getPointerAddressSpace() == 0 &&
837 GetUnderlyingObject(L->getPointerOperand(),
838 L->getModule()->getDataLayout()) == Ptr;
840 if (StoreInst *S = dyn_cast<StoreInst>(I)) {
841 return S->getPointerAddressSpace() == 0 &&
842 GetUnderlyingObject(S->getPointerOperand(),
843 S->getModule()->getDataLayout()) == Ptr;
845 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
846 if (MI->isVolatile()) return false;
848 // FIXME: check whether it has a valuerange that excludes zero?
849 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
850 if (!Len || Len->isZero()) return false;
852 if (MI->getDestAddressSpace() == 0)
853 if (GetUnderlyingObject(MI->getRawDest(),
854 MI->getModule()->getDataLayout()) == Ptr)
856 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
857 if (MTI->getSourceAddressSpace() == 0)
858 if (GetUnderlyingObject(MTI->getRawSource(),
859 MTI->getModule()->getDataLayout()) == Ptr)
865 /// Return true if the allocation associated with Val is ever dereferenced
866 /// within the given basic block. This establishes the fact Val is not null,
867 /// but does not imply that the memory at Val is dereferenceable. (Val may
868 /// point off the end of the dereferenceable part of the object.)
869 static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
870 assert(Val->getType()->isPointerTy());
872 const DataLayout &DL = BB->getModule()->getDataLayout();
873 Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
874 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
875 // inside InstructionDereferencesPointer either.
876 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
877 for (Instruction &I : *BB)
878 if (InstructionDereferencesPointer(&I, UnderlyingVal))
883 bool LazyValueInfoImpl::solveBlockValueNonLocal(LVILatticeVal &BBLV,
884 Value *Val, BasicBlock *BB) {
885 LVILatticeVal Result; // Start Undefined.
887 // If this is the entry block, we must be asking about an argument. The
888 // value is overdefined.
889 if (BB == &BB->getParent()->getEntryBlock()) {
890 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
891 // Before giving up, see if we can prove the pointer non-null local to
892 // this particular block.
893 if (Val->getType()->isPointerTy() &&
894 (isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) {
895 PointerType *PTy = cast<PointerType>(Val->getType());
896 Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
898 Result = LVILatticeVal::getOverdefined();
904 // Loop over all of our predecessors, merging what we know from them into
905 // result. If we encounter an unexplored predecessor, we eagerly explore it
906 // in a depth first manner. In practice, this has the effect of discovering
907 // paths we can't analyze eagerly without spending compile times analyzing
908 // other paths. This heuristic benefits from the fact that predecessors are
909 // frequently arranged such that dominating ones come first and we quickly
910 // find a path to function entry. TODO: We should consider explicitly
911 // canonicalizing to make this true rather than relying on this happy
913 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
914 LVILatticeVal EdgeResult;
915 if (!getEdgeValue(Val, *PI, BB, EdgeResult))
916 // Explore that input, then return here
919 Result.mergeIn(EdgeResult, DL);
921 // If we hit overdefined, exit early. The BlockVals entry is already set
923 if (Result.isOverdefined()) {
924 DEBUG(dbgs() << " compute BB '" << BB->getName()
925 << "' - overdefined because of pred (non local).\n");
926 // Before giving up, see if we can prove the pointer non-null local to
927 // this particular block.
928 if (Val->getType()->isPointerTy() &&
929 isObjectDereferencedInBlock(Val, BB)) {
930 PointerType *PTy = cast<PointerType>(Val->getType());
931 Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
939 // Return the merged value, which is more precise than 'overdefined'.
940 assert(!Result.isOverdefined());
945 bool LazyValueInfoImpl::solveBlockValuePHINode(LVILatticeVal &BBLV,
946 PHINode *PN, BasicBlock *BB) {
947 LVILatticeVal Result; // Start Undefined.
949 // Loop over all of our predecessors, merging what we know from them into
950 // result. See the comment about the chosen traversal order in
951 // solveBlockValueNonLocal; the same reasoning applies here.
952 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
953 BasicBlock *PhiBB = PN->getIncomingBlock(i);
954 Value *PhiVal = PN->getIncomingValue(i);
955 LVILatticeVal EdgeResult;
956 // Note that we can provide PN as the context value to getEdgeValue, even
957 // though the results will be cached, because PN is the value being used as
958 // the cache key in the caller.
959 if (!getEdgeValue(PhiVal, PhiBB, BB, EdgeResult, PN))
960 // Explore that input, then return here
963 Result.mergeIn(EdgeResult, DL);
965 // If we hit overdefined, exit early. The BlockVals entry is already set
967 if (Result.isOverdefined()) {
968 DEBUG(dbgs() << " compute BB '" << BB->getName()
969 << "' - overdefined because of pred (local).\n");
976 // Return the merged value, which is more precise than 'overdefined'.
977 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
982 static LVILatticeVal getValueFromCondition(Value *Val, Value *Cond,
983 bool isTrueDest = true);
985 // If we can determine a constraint on the value given conditions assumed by
986 // the program, intersect those constraints with BBLV
987 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
988 Value *Val, LVILatticeVal &BBLV, Instruction *BBI) {
989 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
993 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
996 auto *I = cast<CallInst>(AssumeVH);
997 if (!isValidAssumeForContext(I, BBI, DT))
1000 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
1003 // If guards are not used in the module, don't spend time looking for them
1004 auto *GuardDecl = BBI->getModule()->getFunction(
1005 Intrinsic::getName(Intrinsic::experimental_guard));
1006 if (!GuardDecl || GuardDecl->use_empty())
1009 for (Instruction &I : make_range(BBI->getIterator().getReverse(),
1010 BBI->getParent()->rend())) {
1011 Value *Cond = nullptr;
1012 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
1013 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
1017 bool LazyValueInfoImpl::solveBlockValueSelect(LVILatticeVal &BBLV,
1018 SelectInst *SI, BasicBlock *BB) {
1020 // Recurse on our inputs if needed
1021 if (!hasBlockValue(SI->getTrueValue(), BB)) {
1022 if (pushBlockValue(std::make_pair(BB, SI->getTrueValue())))
1024 BBLV = LVILatticeVal::getOverdefined();
1027 LVILatticeVal TrueVal = getBlockValue(SI->getTrueValue(), BB);
1028 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
1029 // extra slots in the table if we can.
1030 if (TrueVal.isOverdefined()) {
1031 BBLV = LVILatticeVal::getOverdefined();
1035 if (!hasBlockValue(SI->getFalseValue(), BB)) {
1036 if (pushBlockValue(std::make_pair(BB, SI->getFalseValue())))
1038 BBLV = LVILatticeVal::getOverdefined();
1041 LVILatticeVal FalseVal = getBlockValue(SI->getFalseValue(), BB);
1042 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
1043 // extra slots in the table if we can.
1044 if (FalseVal.isOverdefined()) {
1045 BBLV = LVILatticeVal::getOverdefined();
1049 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
1050 const ConstantRange &TrueCR = TrueVal.getConstantRange();
1051 const ConstantRange &FalseCR = FalseVal.getConstantRange();
1052 Value *LHS = nullptr;
1053 Value *RHS = nullptr;
1054 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
1055 // Is this a min specifically of our two inputs? (Avoid the risk of
1056 // ValueTracking getting smarter looking back past our immediate inputs.)
1057 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
1058 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
1059 ConstantRange ResultCR = [&]() {
1060 switch (SPR.Flavor) {
1062 llvm_unreachable("unexpected minmax type!");
1063 case SPF_SMIN: /// Signed minimum
1064 return TrueCR.smin(FalseCR);
1065 case SPF_UMIN: /// Unsigned minimum
1066 return TrueCR.umin(FalseCR);
1067 case SPF_SMAX: /// Signed maximum
1068 return TrueCR.smax(FalseCR);
1069 case SPF_UMAX: /// Unsigned maximum
1070 return TrueCR.umax(FalseCR);
1073 BBLV = LVILatticeVal::getRange(ResultCR);
1077 // TODO: ABS, NABS from the SelectPatternResult
1080 // Can we constrain the facts about the true and false values by using the
1081 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
1082 // TODO: We could potentially refine an overdefined true value above.
1083 Value *Cond = SI->getCondition();
1084 TrueVal = intersect(TrueVal,
1085 getValueFromCondition(SI->getTrueValue(), Cond, true));
1086 FalseVal = intersect(FalseVal,
1087 getValueFromCondition(SI->getFalseValue(), Cond, false));
1089 // Handle clamp idioms such as:
1090 // %24 = constantrange<0, 17>
1091 // %39 = icmp eq i32 %24, 0
1092 // %40 = add i32 %24, -1
1093 // %siv.next = select i1 %39, i32 16, i32 %40
1094 // %siv.next = constantrange<0, 17> not <-1, 17>
1095 // In general, this can handle any clamp idiom which tests the edge
1096 // condition via an equality or inequality.
1097 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
1098 ICmpInst::Predicate Pred = ICI->getPredicate();
1099 Value *A = ICI->getOperand(0);
1100 if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
1101 auto addConstants = [](ConstantInt *A, ConstantInt *B) {
1102 assert(A->getType() == B->getType());
1103 return ConstantInt::get(A->getType(), A->getValue() + B->getValue());
1105 // See if either input is A + C2, subject to the constraint from the
1106 // condition that A != C when that input is used. We can assume that
1107 // that input doesn't include C + C2.
1108 ConstantInt *CIAdded;
1111 case ICmpInst::ICMP_EQ:
1112 if (match(SI->getFalseValue(), m_Add(m_Specific(A),
1113 m_ConstantInt(CIAdded)))) {
1114 auto ResNot = addConstants(CIBase, CIAdded);
1115 FalseVal = intersect(FalseVal,
1116 LVILatticeVal::getNot(ResNot));
1119 case ICmpInst::ICMP_NE:
1120 if (match(SI->getTrueValue(), m_Add(m_Specific(A),
1121 m_ConstantInt(CIAdded)))) {
1122 auto ResNot = addConstants(CIBase, CIAdded);
1123 TrueVal = intersect(TrueVal,
1124 LVILatticeVal::getNot(ResNot));
1131 LVILatticeVal Result; // Start Undefined.
1132 Result.mergeIn(TrueVal, DL);
1133 Result.mergeIn(FalseVal, DL);
1138 bool LazyValueInfoImpl::solveBlockValueCast(LVILatticeVal &BBLV,
1141 if (!CI->getOperand(0)->getType()->isSized()) {
1142 // Without knowing how wide the input is, we can't analyze it in any useful
1144 BBLV = LVILatticeVal::getOverdefined();
1148 // Filter out casts we don't know how to reason about before attempting to
1149 // recurse on our operand. This can cut a long search short if we know we're
1150 // not going to be able to get any useful information anways.
1151 switch (CI->getOpcode()) {
1152 case Instruction::Trunc:
1153 case Instruction::SExt:
1154 case Instruction::ZExt:
1155 case Instruction::BitCast:
1158 // Unhandled instructions are overdefined.
1159 DEBUG(dbgs() << " compute BB '" << BB->getName()
1160 << "' - overdefined (unknown cast).\n");
1161 BBLV = LVILatticeVal::getOverdefined();
1165 // Figure out the range of the LHS. If that fails, we still apply the
1166 // transfer rule on the full set since we may be able to locally infer
1167 // interesting facts.
1168 if (!hasBlockValue(CI->getOperand(0), BB))
1169 if (pushBlockValue(std::make_pair(BB, CI->getOperand(0))))
1170 // More work to do before applying this transfer rule.
1173 const unsigned OperandBitWidth =
1174 DL.getTypeSizeInBits(CI->getOperand(0)->getType());
1175 ConstantRange LHSRange = ConstantRange(OperandBitWidth);
1176 if (hasBlockValue(CI->getOperand(0), BB)) {
1177 LVILatticeVal LHSVal = getBlockValue(CI->getOperand(0), BB);
1178 intersectAssumeOrGuardBlockValueConstantRange(CI->getOperand(0), LHSVal,
1180 if (LHSVal.isConstantRange())
1181 LHSRange = LHSVal.getConstantRange();
1184 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
1186 // NOTE: We're currently limited by the set of operations that ConstantRange
1187 // can evaluate symbolically. Enhancing that set will allows us to analyze
1188 // more definitions.
1189 BBLV = LVILatticeVal::getRange(LHSRange.castOp(CI->getOpcode(),
1194 bool LazyValueInfoImpl::solveBlockValueBinaryOp(LVILatticeVal &BBLV,
1198 assert(BO->getOperand(0)->getType()->isSized() &&
1199 "all operands to binary operators are sized");
1201 // Filter out operators we don't know how to reason about before attempting to
1202 // recurse on our operand(s). This can cut a long search short if we know
1203 // we're not going to be able to get any useful information anyways.
1204 switch (BO->getOpcode()) {
1205 case Instruction::Add:
1206 case Instruction::Sub:
1207 case Instruction::Mul:
1208 case Instruction::UDiv:
1209 case Instruction::Shl:
1210 case Instruction::LShr:
1211 case Instruction::And:
1212 case Instruction::Or:
1213 // continue into the code below
1216 // Unhandled instructions are overdefined.
1217 DEBUG(dbgs() << " compute BB '" << BB->getName()
1218 << "' - overdefined (unknown binary operator).\n");
1219 BBLV = LVILatticeVal::getOverdefined();
1223 // Figure out the range of the LHS. If that fails, use a conservative range,
1224 // but apply the transfer rule anyways. This lets us pick up facts from
1225 // expressions like "and i32 (call i32 @foo()), 32"
1226 if (!hasBlockValue(BO->getOperand(0), BB))
1227 if (pushBlockValue(std::make_pair(BB, BO->getOperand(0))))
1228 // More work to do before applying this transfer rule.
1231 const unsigned OperandBitWidth =
1232 DL.getTypeSizeInBits(BO->getOperand(0)->getType());
1233 ConstantRange LHSRange = ConstantRange(OperandBitWidth);
1234 if (hasBlockValue(BO->getOperand(0), BB)) {
1235 LVILatticeVal LHSVal = getBlockValue(BO->getOperand(0), BB);
1236 intersectAssumeOrGuardBlockValueConstantRange(BO->getOperand(0), LHSVal,
1238 if (LHSVal.isConstantRange())
1239 LHSRange = LHSVal.getConstantRange();
1242 ConstantInt *RHS = cast<ConstantInt>(BO->getOperand(1));
1243 ConstantRange RHSRange = ConstantRange(RHS->getValue());
1245 // NOTE: We're currently limited by the set of operations that ConstantRange
1246 // can evaluate symbolically. Enhancing that set will allows us to analyze
1247 // more definitions.
1248 Instruction::BinaryOps BinOp = BO->getOpcode();
1249 BBLV = LVILatticeVal::getRange(LHSRange.binaryOp(BinOp, RHSRange));
1253 static LVILatticeVal getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
1255 Value *LHS = ICI->getOperand(0);
1256 Value *RHS = ICI->getOperand(1);
1257 CmpInst::Predicate Predicate = ICI->getPredicate();
1259 if (isa<Constant>(RHS)) {
1260 if (ICI->isEquality() && LHS == Val) {
1261 // We know that V has the RHS constant if this is a true SETEQ or
1263 if (isTrueDest == (Predicate == ICmpInst::ICMP_EQ))
1264 return LVILatticeVal::get(cast<Constant>(RHS));
1266 return LVILatticeVal::getNot(cast<Constant>(RHS));
1270 if (!Val->getType()->isIntegerTy())
1271 return LVILatticeVal::getOverdefined();
1273 // Use ConstantRange::makeAllowedICmpRegion in order to determine the possible
1274 // range of Val guaranteed by the condition. Recognize comparisons in the from
1276 // icmp <pred> Val, ...
1277 // icmp <pred> (add Val, Offset), ...
1278 // The latter is the range checking idiom that InstCombine produces. Subtract
1279 // the offset from the allowed range for RHS in this case.
1281 // Val or (add Val, Offset) can be on either hand of the comparison
1282 if (LHS != Val && !match(LHS, m_Add(m_Specific(Val), m_ConstantInt()))) {
1283 std::swap(LHS, RHS);
1284 Predicate = CmpInst::getSwappedPredicate(Predicate);
1287 ConstantInt *Offset = nullptr;
1289 match(LHS, m_Add(m_Specific(Val), m_ConstantInt(Offset)));
1291 if (LHS == Val || Offset) {
1292 // Calculate the range of values that are allowed by the comparison
1293 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1294 /*isFullSet=*/true);
1295 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1296 RHSRange = ConstantRange(CI->getValue());
1297 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1298 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1299 RHSRange = getConstantRangeFromMetadata(*Ranges);
1301 // If we're interested in the false dest, invert the condition
1302 CmpInst::Predicate Pred =
1303 isTrueDest ? Predicate : CmpInst::getInversePredicate(Predicate);
1304 ConstantRange TrueValues =
1305 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1307 if (Offset) // Apply the offset from above.
1308 TrueValues = TrueValues.subtract(Offset->getValue());
1310 return LVILatticeVal::getRange(std::move(TrueValues));
1313 return LVILatticeVal::getOverdefined();
1316 static LVILatticeVal
1317 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1318 DenseMap<Value*, LVILatticeVal> &Visited);
1320 static LVILatticeVal
1321 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
1322 DenseMap<Value*, LVILatticeVal> &Visited) {
1323 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1324 return getValueFromICmpCondition(Val, ICI, isTrueDest);
1326 // Handle conditions in the form of (cond1 && cond2), we know that on the
1327 // true dest path both of the conditions hold. Similarly for conditions of
1328 // the form (cond1 || cond2), we know that on the false dest path neither
1330 BinaryOperator *BO = dyn_cast<BinaryOperator>(Cond);
1331 if (!BO || (isTrueDest && BO->getOpcode() != BinaryOperator::And) ||
1332 (!isTrueDest && BO->getOpcode() != BinaryOperator::Or))
1333 return LVILatticeVal::getOverdefined();
1335 auto RHS = getValueFromCondition(Val, BO->getOperand(0), isTrueDest, Visited);
1336 auto LHS = getValueFromCondition(Val, BO->getOperand(1), isTrueDest, Visited);
1337 return intersect(RHS, LHS);
1340 static LVILatticeVal
1341 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1342 DenseMap<Value*, LVILatticeVal> &Visited) {
1343 auto I = Visited.find(Cond);
1344 if (I != Visited.end())
1347 auto Result = getValueFromConditionImpl(Val, Cond, isTrueDest, Visited);
1348 Visited[Cond] = Result;
1352 LVILatticeVal getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest) {
1353 assert(Cond && "precondition");
1354 DenseMap<Value*, LVILatticeVal> Visited;
1355 return getValueFromCondition(Val, Cond, isTrueDest, Visited);
1358 /// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1359 /// Val is not constrained on the edge. Result is unspecified if return value
1361 static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1362 BasicBlock *BBTo, LVILatticeVal &Result) {
1363 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1364 // know that v != 0.
1365 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1366 // If this is a conditional branch and only one successor goes to BBTo, then
1367 // we may be able to infer something from the condition.
1368 if (BI->isConditional() &&
1369 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1370 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1371 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1372 "BBTo isn't a successor of BBFrom");
1374 // If V is the condition of the branch itself, then we know exactly what
1376 if (BI->getCondition() == Val) {
1377 Result = LVILatticeVal::get(ConstantInt::get(
1378 Type::getInt1Ty(Val->getContext()), isTrueDest));
1382 // If the condition of the branch is an equality comparison, we may be
1383 // able to infer the value.
1384 Result = getValueFromCondition(Val, BI->getCondition(), isTrueDest);
1385 if (!Result.isOverdefined())
1390 // If the edge was formed by a switch on the value, then we may know exactly
1392 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1393 if (SI->getCondition() != Val)
1396 bool DefaultCase = SI->getDefaultDest() == BBTo;
1397 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1398 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1400 for (auto Case : SI->cases()) {
1401 ConstantRange EdgeVal(Case.getCaseValue()->getValue());
1403 // It is possible that the default destination is the destination of
1404 // some cases. There is no need to perform difference for those cases.
1405 if (Case.getCaseSuccessor() != BBTo)
1406 EdgesVals = EdgesVals.difference(EdgeVal);
1407 } else if (Case.getCaseSuccessor() == BBTo)
1408 EdgesVals = EdgesVals.unionWith(EdgeVal);
1410 Result = LVILatticeVal::getRange(std::move(EdgesVals));
1416 /// \brief Compute the value of Val on the edge BBFrom -> BBTo or the value at
1417 /// the basic block if the edge does not constrain Val.
1418 bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1419 BasicBlock *BBTo, LVILatticeVal &Result,
1420 Instruction *CxtI) {
1421 // If already a constant, there is nothing to compute.
1422 if (Constant *VC = dyn_cast<Constant>(Val)) {
1423 Result = LVILatticeVal::get(VC);
1427 LVILatticeVal LocalResult;
1428 if (!getEdgeValueLocal(Val, BBFrom, BBTo, LocalResult))
1429 // If we couldn't constrain the value on the edge, LocalResult doesn't
1430 // provide any information.
1431 LocalResult = LVILatticeVal::getOverdefined();
1433 if (hasSingleValue(LocalResult)) {
1434 // Can't get any more precise here
1435 Result = LocalResult;
1439 if (!hasBlockValue(Val, BBFrom)) {
1440 if (pushBlockValue(std::make_pair(BBFrom, Val)))
1442 // No new information.
1443 Result = LocalResult;
1447 // Try to intersect ranges of the BB and the constraint on the edge.
1448 LVILatticeVal InBlock = getBlockValue(Val, BBFrom);
1449 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock,
1450 BBFrom->getTerminator());
1451 // We can use the context instruction (generically the ultimate instruction
1452 // the calling pass is trying to simplify) here, even though the result of
1453 // this function is generally cached when called from the solve* functions
1454 // (and that cached result might be used with queries using a different
1455 // context instruction), because when this function is called from the solve*
1456 // functions, the context instruction is not provided. When called from
1457 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1458 // but then the result is not cached.
1459 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1461 Result = intersect(LocalResult, InBlock);
1465 LVILatticeVal LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1466 Instruction *CxtI) {
1467 DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1468 << BB->getName() << "'\n");
1470 assert(BlockValueStack.empty() && BlockValueSet.empty());
1471 if (!hasBlockValue(V, BB)) {
1472 pushBlockValue(std::make_pair(BB, V));
1475 LVILatticeVal Result = getBlockValue(V, BB);
1476 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1478 DEBUG(dbgs() << " Result = " << Result << "\n");
1482 LVILatticeVal LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1483 DEBUG(dbgs() << "LVI Getting value " << *V << " at '"
1484 << CxtI->getName() << "'\n");
1486 if (auto *C = dyn_cast<Constant>(V))
1487 return LVILatticeVal::get(C);
1489 LVILatticeVal Result = LVILatticeVal::getOverdefined();
1490 if (auto *I = dyn_cast<Instruction>(V))
1491 Result = getFromRangeMetadata(I);
1492 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1494 DEBUG(dbgs() << " Result = " << Result << "\n");
1498 LVILatticeVal LazyValueInfoImpl::
1499 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1500 Instruction *CxtI) {
1501 DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1502 << FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
1504 LVILatticeVal Result;
1505 if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) {
1507 bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result, CxtI);
1509 assert(WasFastQuery && "More work to do after problem solved?");
1512 DEBUG(dbgs() << " Result = " << Result << "\n");
1516 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1517 BasicBlock *NewSucc) {
1518 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1521 //===----------------------------------------------------------------------===//
1522 // LazyValueInfo Impl
1523 //===----------------------------------------------------------------------===//
1525 /// This lazily constructs the LazyValueInfoImpl.
1526 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1527 const DataLayout *DL,
1528 DominatorTree *DT = nullptr) {
1530 assert(DL && "getCache() called with a null DataLayout");
1531 PImpl = new LazyValueInfoImpl(AC, *DL, DT);
1533 return *static_cast<LazyValueInfoImpl*>(PImpl);
1536 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1537 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1538 const DataLayout &DL = F.getParent()->getDataLayout();
1540 DominatorTreeWrapperPass *DTWP =
1541 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1542 Info.DT = DTWP ? &DTWP->getDomTree() : nullptr;
1543 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1546 getImpl(Info.PImpl, Info.AC, &DL, Info.DT).clear();
1552 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1553 AU.setPreservesAll();
1554 AU.addRequired<AssumptionCacheTracker>();
1555 AU.addRequired<TargetLibraryInfoWrapperPass>();
1558 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1560 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1562 void LazyValueInfo::releaseMemory() {
1563 // If the cache was allocated, free it.
1565 delete &getImpl(PImpl, AC, nullptr);
1570 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1571 FunctionAnalysisManager::Invalidator &Inv) {
1572 // We need to invalidate if we have either failed to preserve this analyses
1573 // result directly or if any of its dependencies have been invalidated.
1574 auto PAC = PA.getChecker<LazyValueAnalysis>();
1575 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
1576 (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)))
1582 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1584 LazyValueInfo LazyValueAnalysis::run(Function &F, FunctionAnalysisManager &FAM) {
1585 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1586 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1587 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
1589 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI, DT);
1592 /// Returns true if we can statically tell that this value will never be a
1593 /// "useful" constant. In practice, this means we've got something like an
1594 /// alloca or a malloc call for which a comparison against a constant can
1595 /// only be guarding dead code. Note that we are potentially giving up some
1596 /// precision in dead code (a constant result) in favour of avoiding a
1597 /// expensive search for a easily answered common query.
1598 static bool isKnownNonConstant(Value *V) {
1599 V = V->stripPointerCasts();
1600 // The return val of alloc cannot be a Constant.
1601 if (isa<AllocaInst>(V))
1606 Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
1607 Instruction *CxtI) {
1608 // Bail out early if V is known not to be a Constant.
1609 if (isKnownNonConstant(V))
1612 const DataLayout &DL = BB->getModule()->getDataLayout();
1613 LVILatticeVal Result =
1614 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1616 if (Result.isConstant())
1617 return Result.getConstant();
1618 if (Result.isConstantRange()) {
1619 const ConstantRange &CR = Result.getConstantRange();
1620 if (const APInt *SingleVal = CR.getSingleElement())
1621 return ConstantInt::get(V->getContext(), *SingleVal);
1626 ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
1627 Instruction *CxtI) {
1628 assert(V->getType()->isIntegerTy());
1629 unsigned Width = V->getType()->getIntegerBitWidth();
1630 const DataLayout &DL = BB->getModule()->getDataLayout();
1631 LVILatticeVal Result =
1632 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1633 if (Result.isUndefined())
1634 return ConstantRange(Width, /*isFullSet=*/false);
1635 if (Result.isConstantRange())
1636 return Result.getConstantRange();
1637 // We represent ConstantInt constants as constant ranges but other kinds
1638 // of integer constants, i.e. ConstantExpr will be tagged as constants
1639 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1640 "ConstantInt value must be represented as constantrange");
1641 return ConstantRange(Width, /*isFullSet=*/true);
1644 /// Determine whether the specified value is known to be a
1645 /// constant on the specified edge. Return null if not.
1646 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1648 Instruction *CxtI) {
1649 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1650 LVILatticeVal Result =
1651 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1653 if (Result.isConstant())
1654 return Result.getConstant();
1655 if (Result.isConstantRange()) {
1656 const ConstantRange &CR = Result.getConstantRange();
1657 if (const APInt *SingleVal = CR.getSingleElement())
1658 return ConstantInt::get(V->getContext(), *SingleVal);
1663 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1666 Instruction *CxtI) {
1667 unsigned Width = V->getType()->getIntegerBitWidth();
1668 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1669 LVILatticeVal Result =
1670 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1672 if (Result.isUndefined())
1673 return ConstantRange(Width, /*isFullSet=*/false);
1674 if (Result.isConstantRange())
1675 return Result.getConstantRange();
1676 // We represent ConstantInt constants as constant ranges but other kinds
1677 // of integer constants, i.e. ConstantExpr will be tagged as constants
1678 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1679 "ConstantInt value must be represented as constantrange");
1680 return ConstantRange(Width, /*isFullSet=*/true);
1683 static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
1684 const LVILatticeVal &Val,
1685 const DataLayout &DL,
1686 TargetLibraryInfo *TLI) {
1688 // If we know the value is a constant, evaluate the conditional.
1689 Constant *Res = nullptr;
1690 if (Val.isConstant()) {
1691 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1692 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1693 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1694 return LazyValueInfo::Unknown;
1697 if (Val.isConstantRange()) {
1698 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1699 if (!CI) return LazyValueInfo::Unknown;
1701 const ConstantRange &CR = Val.getConstantRange();
1702 if (Pred == ICmpInst::ICMP_EQ) {
1703 if (!CR.contains(CI->getValue()))
1704 return LazyValueInfo::False;
1706 if (CR.isSingleElement())
1707 return LazyValueInfo::True;
1708 } else if (Pred == ICmpInst::ICMP_NE) {
1709 if (!CR.contains(CI->getValue()))
1710 return LazyValueInfo::True;
1712 if (CR.isSingleElement())
1713 return LazyValueInfo::False;
1715 // Handle more complex predicates.
1716 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1717 (ICmpInst::Predicate)Pred, CI->getValue());
1718 if (TrueValues.contains(CR))
1719 return LazyValueInfo::True;
1720 if (TrueValues.inverse().contains(CR))
1721 return LazyValueInfo::False;
1723 return LazyValueInfo::Unknown;
1726 if (Val.isNotConstant()) {
1727 // If this is an equality comparison, we can try to fold it knowing that
1729 if (Pred == ICmpInst::ICMP_EQ) {
1730 // !C1 == C -> false iff C1 == C.
1731 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1732 Val.getNotConstant(), C, DL,
1734 if (Res->isNullValue())
1735 return LazyValueInfo::False;
1736 } else if (Pred == ICmpInst::ICMP_NE) {
1737 // !C1 != C -> true iff C1 == C.
1738 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1739 Val.getNotConstant(), C, DL,
1741 if (Res->isNullValue())
1742 return LazyValueInfo::True;
1744 return LazyValueInfo::Unknown;
1747 return LazyValueInfo::Unknown;
1750 /// Determine whether the specified value comparison with a constant is known to
1751 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1752 LazyValueInfo::Tristate
1753 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1754 BasicBlock *FromBB, BasicBlock *ToBB,
1755 Instruction *CxtI) {
1756 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1757 LVILatticeVal Result =
1758 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1760 return getPredicateResult(Pred, C, Result, DL, TLI);
1763 LazyValueInfo::Tristate
1764 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1765 Instruction *CxtI) {
1766 // Is or is not NonNull are common predicates being queried. If
1767 // isKnownNonNull can tell us the result of the predicate, we can
1768 // return it quickly. But this is only a fastpath, and falling
1769 // through would still be correct.
1770 if (V->getType()->isPointerTy() && C->isNullValue() &&
1771 isKnownNonNull(V->stripPointerCasts())) {
1772 if (Pred == ICmpInst::ICMP_EQ)
1773 return LazyValueInfo::False;
1774 else if (Pred == ICmpInst::ICMP_NE)
1775 return LazyValueInfo::True;
1777 const DataLayout &DL = CxtI->getModule()->getDataLayout();
1778 LVILatticeVal Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
1779 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1783 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1784 // LVI as a whole tries to compute a lattice value which is conservatively
1785 // correct at a given location. In this case, we have a predicate which we
1786 // weren't able to prove about the merged result, and we're pushing that
1787 // predicate back along each incoming edge to see if we can prove it
1788 // separately for each input. As a motivating example, consider:
1790 // %v1 = ... ; constantrange<1, 5>
1793 // %v2 = ... ; constantrange<10, 20>
1796 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1797 // %pred = icmp eq i32 %phi, 8
1798 // We can't tell from the lattice value for '%phi' that '%pred' is false
1799 // along each path, but by checking the predicate over each input separately,
1801 // We limit the search to one step backwards from the current BB and value.
1802 // We could consider extending this to search further backwards through the
1803 // CFG and/or value graph, but there are non-obvious compile time vs quality
1806 BasicBlock *BB = CxtI->getParent();
1808 // Function entry or an unreachable block. Bail to avoid confusing
1810 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1814 // If V is a PHI node in the same block as the context, we need to ask
1815 // questions about the predicate as applied to the incoming value along
1816 // each edge. This is useful for eliminating cases where the predicate is
1817 // known along all incoming edges.
1818 if (auto *PHI = dyn_cast<PHINode>(V))
1819 if (PHI->getParent() == BB) {
1820 Tristate Baseline = Unknown;
1821 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1822 Value *Incoming = PHI->getIncomingValue(i);
1823 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1824 // Note that PredBB may be BB itself.
1825 Tristate Result = getPredicateOnEdge(Pred, Incoming, C, PredBB, BB,
1828 // Keep going as long as we've seen a consistent known result for
1830 Baseline = (i == 0) ? Result /* First iteration */
1831 : (Baseline == Result ? Baseline : Unknown); /* All others */
1832 if (Baseline == Unknown)
1835 if (Baseline != Unknown)
1839 // For a comparison where the V is outside this block, it's possible
1840 // that we've branched on it before. Look to see if the value is known
1841 // on all incoming edges.
1842 if (!isa<Instruction>(V) ||
1843 cast<Instruction>(V)->getParent() != BB) {
1844 // For predecessor edge, determine if the comparison is true or false
1845 // on that edge. If they're all true or all false, we can conclude
1846 // the value of the comparison in this block.
1847 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1848 if (Baseline != Unknown) {
1849 // Check that all remaining incoming values match the first one.
1850 while (++PI != PE) {
1851 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1852 if (Ret != Baseline) break;
1854 // If we terminated early, then one of the values didn't match.
1864 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1865 BasicBlock *NewSucc) {
1867 const DataLayout &DL = PredBB->getModule()->getDataLayout();
1868 getImpl(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
1872 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1874 const DataLayout &DL = BB->getModule()->getDataLayout();
1875 getImpl(PImpl, AC, &DL, DT).eraseBlock(BB);
1880 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1882 getImpl(PImpl, AC, DL, DT).printLVI(F, DTree, OS);
1886 // Print the LVI for the function arguments at the start of each basic block.
1887 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1888 const BasicBlock *BB, formatted_raw_ostream &OS) {
1889 // Find if there are latticevalues defined for arguments of the function.
1890 auto *F = BB->getParent();
1891 for (auto &Arg : F->args()) {
1892 LVILatticeVal Result = LVIImpl->getValueInBlock(
1893 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1894 if (Result.isUndefined())
1896 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1900 // This function prints the LVI analysis for the instruction I at the beginning
1901 // of various basic blocks. It relies on calculated values that are stored in
1902 // the LazyValueInfoCache, and in the absence of cached values, recalculte the
1903 // LazyValueInfo for `I`, and print that info.
1904 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1905 const Instruction *I, formatted_raw_ostream &OS) {
1907 auto *ParentBB = I->getParent();
1908 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
1909 // We can generate (solve) LVI values only for blocks that are dominated by
1910 // the I's parent. However, to avoid generating LVI for all dominating blocks,
1911 // that contain redundant/uninteresting information, we print LVI for
1912 // blocks that may use this LVI information (such as immediate successor
1913 // blocks, and blocks that contain uses of `I`).
1914 auto printResult = [&](const BasicBlock *BB) {
1915 if (!BlocksContainingLVI.insert(BB).second)
1917 LVILatticeVal Result = LVIImpl->getValueInBlock(
1918 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
1919 OS << "; LatticeVal for: '" << *I << "' in BB: '";
1920 BB->printAsOperand(OS, false);
1921 OS << "' is: " << Result << "\n";
1924 printResult(ParentBB);
1925 // Print the LVI analysis results for the the immediate successor blocks, that
1926 // are dominated by `ParentBB`.
1927 for (auto *BBSucc : successors(ParentBB))
1928 if (DT.dominates(ParentBB, BBSucc))
1929 printResult(BBSucc);
1931 // Print LVI in blocks where `I` is used.
1932 for (auto *U : I->users())
1933 if (auto *UseI = dyn_cast<Instruction>(U))
1934 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
1935 printResult(UseI->getParent());
1940 // Printer class for LazyValueInfo results.
1941 class LazyValueInfoPrinter : public FunctionPass {
1943 static char ID; // Pass identification, replacement for typeid
1944 LazyValueInfoPrinter() : FunctionPass(ID) {
1945 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry());
1948 void getAnalysisUsage(AnalysisUsage &AU) const override {
1949 AU.setPreservesAll();
1950 AU.addRequired<LazyValueInfoWrapperPass>();
1951 AU.addRequired<DominatorTreeWrapperPass>();
1954 // Get the mandatory dominator tree analysis and pass this in to the
1955 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
1956 bool runOnFunction(Function &F) override {
1957 dbgs() << "LVI for function '" << F.getName() << "':\n";
1958 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
1959 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1960 LVI.printLVI(F, DTree, dbgs());
1966 char LazyValueInfoPrinter::ID = 0;
1967 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",
1968 "Lazy Value Info Printer Pass", false, false)
1969 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
1970 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",
1971 "Lazy Value Info Printer Pass", false, false)