1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interface for lazy computation of value constraint
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/LazyValueInfo.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/ValueLattice.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/AssemblyAnnotationWriter.h"
25 #include "llvm/IR/CFG.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/IR/ValueHandle.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/FormattedStream.h"
39 #include "llvm/Support/raw_ostream.h"
42 using namespace PatternMatch;
44 #define DEBUG_TYPE "lazy-value-info"
46 // This is the number of worklist items we will process to try to discover an
47 // answer for a given value.
48 static const unsigned MaxProcessedPerValue = 500;
50 char LazyValueInfoWrapperPass::ID = 0;
51 LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {
52 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
54 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
55 "Lazy Value Information Analysis", false, true)
56 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
57 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
58 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
59 "Lazy Value Information Analysis", false, true)
62 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
65 AnalysisKey LazyValueAnalysis::Key;
67 /// Returns true if this lattice value represents at most one possible value.
68 /// This is as precise as any lattice value can get while still representing
70 static bool hasSingleValue(const ValueLatticeElement &Val) {
71 if (Val.isConstantRange() &&
72 Val.getConstantRange().isSingleElement())
73 // Integer constants are single element ranges
76 // Non integer constants
81 /// Combine two sets of facts about the same value into a single set of
82 /// facts. Note that this method is not suitable for merging facts along
83 /// different paths in a CFG; that's what the mergeIn function is for. This
84 /// is for merging facts gathered about the same value at the same location
85 /// through two independent means.
87 /// * This method does not promise to return the most precise possible lattice
88 /// value implied by A and B. It is allowed to return any lattice element
89 /// which is at least as strong as *either* A or B (unless our facts
90 /// conflict, see below).
91 /// * Due to unreachable code, the intersection of two lattice values could be
92 /// contradictory. If this happens, we return some valid lattice value so as
93 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
94 /// we do not make this guarantee. TODO: This would be a useful enhancement.
95 static ValueLatticeElement intersect(const ValueLatticeElement &A,
96 const ValueLatticeElement &B) {
97 // Undefined is the strongest state. It means the value is known to be along
98 // an unreachable path.
104 // If we gave up for one, but got a useable fact from the other, use it.
105 if (A.isOverdefined())
107 if (B.isOverdefined())
110 // Can't get any more precise than constants.
111 if (hasSingleValue(A))
113 if (hasSingleValue(B))
116 // Could be either constant range or not constant here.
117 if (!A.isConstantRange() || !B.isConstantRange()) {
118 // TODO: Arbitrary choice, could be improved
122 // Intersect two constant ranges
123 ConstantRange Range =
124 A.getConstantRange().intersectWith(B.getConstantRange());
125 // Note: An empty range is implicitly converted to unknown or undef depending
126 // on MayIncludeUndef internally.
127 return ValueLatticeElement::getRange(
128 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() |
129 B.isConstantRangeIncludingUndef());
132 //===----------------------------------------------------------------------===//
133 // LazyValueInfoCache Decl
134 //===----------------------------------------------------------------------===//
137 /// A callback value handle updates the cache when values are erased.
138 class LazyValueInfoCache;
139 struct LVIValueHandle final : public CallbackVH {
140 LazyValueInfoCache *Parent;
142 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
143 : CallbackVH(V), Parent(P) { }
145 void deleted() override;
146 void allUsesReplacedWith(Value *V) override {
150 } // end anonymous namespace
153 /// This is the cache kept by LazyValueInfo which
154 /// maintains information about queries across the clients' queries.
155 class LazyValueInfoCache {
156 /// This is all of the cached information for one basic block. It contains
157 /// the per-value lattice elements, as well as a separate set for
158 /// overdefined values to reduce memory usage.
159 struct BlockCacheEntry {
160 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
161 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
164 /// Cached information per basic block.
165 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
167 /// Set of value handles used to erase values from the cache on deletion.
168 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
170 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
171 auto It = BlockCache.find_as(BB);
172 if (It == BlockCache.end())
174 return It->second.get();
177 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
178 auto It = BlockCache.find_as(BB);
179 if (It == BlockCache.end())
180 It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
183 return It->second.get();
186 void addValueHandle(Value *Val) {
187 auto HandleIt = ValueHandles.find_as(Val);
188 if (HandleIt == ValueHandles.end())
189 ValueHandles.insert({ Val, this });
193 void insertResult(Value *Val, BasicBlock *BB,
194 const ValueLatticeElement &Result) {
195 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
197 // Insert over-defined values into their own cache to reduce memory
199 if (Result.isOverdefined())
200 Entry->OverDefined.insert(Val);
202 Entry->LatticeElements.insert({ Val, Result });
207 Optional<ValueLatticeElement> getCachedValueInfo(Value *V,
208 BasicBlock *BB) const {
209 const BlockCacheEntry *Entry = getBlockEntry(BB);
213 if (Entry->OverDefined.count(V))
214 return ValueLatticeElement::getOverdefined();
216 auto LatticeIt = Entry->LatticeElements.find_as(V);
217 if (LatticeIt == Entry->LatticeElements.end())
220 return LatticeIt->second;
223 /// clear - Empty the cache.
226 ValueHandles.clear();
229 /// Inform the cache that a given value has been deleted.
230 void eraseValue(Value *V);
232 /// This is part of the update interface to inform the cache
233 /// that a block has been deleted.
234 void eraseBlock(BasicBlock *BB);
236 /// Updates the cache to remove any influence an overdefined value in
237 /// OldSucc might have (unless also overdefined in NewSucc). This just
238 /// flushes elements from the cache and does not add any.
239 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
243 void LazyValueInfoCache::eraseValue(Value *V) {
244 for (auto &Pair : BlockCache) {
245 Pair.second->LatticeElements.erase(V);
246 Pair.second->OverDefined.erase(V);
249 auto HandleIt = ValueHandles.find_as(V);
250 if (HandleIt != ValueHandles.end())
251 ValueHandles.erase(HandleIt);
254 void LVIValueHandle::deleted() {
255 // This erasure deallocates *this, so it MUST happen after we're done
256 // using any and all members of *this.
257 Parent->eraseValue(*this);
260 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
261 BlockCache.erase(BB);
264 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
265 BasicBlock *NewSucc) {
266 // When an edge in the graph has been threaded, values that we could not
267 // determine a value for before (i.e. were marked overdefined) may be
268 // possible to solve now. We do NOT try to proactively update these values.
269 // Instead, we clear their entries from the cache, and allow lazy updating to
270 // recompute them when needed.
272 // The updating process is fairly simple: we need to drop cached info
273 // for all values that were marked overdefined in OldSucc, and for those same
274 // values in any successor of OldSucc (except NewSucc) in which they were
275 // also marked overdefined.
276 std::vector<BasicBlock*> worklist;
277 worklist.push_back(OldSucc);
279 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
280 if (!Entry || Entry->OverDefined.empty())
281 return; // Nothing to process here.
282 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
283 Entry->OverDefined.end());
285 // Use a worklist to perform a depth-first search of OldSucc's successors.
286 // NOTE: We do not need a visited list since any blocks we have already
287 // visited will have had their overdefined markers cleared already, and we
288 // thus won't loop to their successors.
289 while (!worklist.empty()) {
290 BasicBlock *ToUpdate = worklist.back();
293 // Skip blocks only accessible through NewSucc.
294 if (ToUpdate == NewSucc) continue;
296 // If a value was marked overdefined in OldSucc, and is here too...
297 auto OI = BlockCache.find_as(ToUpdate);
298 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
300 auto &ValueSet = OI->second->OverDefined;
302 bool changed = false;
303 for (Value *V : ValsToClear) {
304 if (!ValueSet.erase(V))
307 // If we removed anything, then we potentially need to update
308 // blocks successors too.
312 if (!changed) continue;
314 worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
320 /// An assembly annotator class to print LazyValueCache information in
322 class LazyValueInfoImpl;
323 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
324 LazyValueInfoImpl *LVIImpl;
325 // While analyzing which blocks we can solve values for, we need the dominator
330 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
331 : LVIImpl(L), DT(DTree) {}
333 void emitBasicBlockStartAnnot(const BasicBlock *BB,
334 formatted_raw_ostream &OS) override;
336 void emitInstructionAnnot(const Instruction *I,
337 formatted_raw_ostream &OS) override;
341 // The actual implementation of the lazy analysis and update. Note that the
342 // inheritance from LazyValueInfoCache is intended to be temporary while
343 // splitting the code and then transitioning to a has-a relationship.
344 class LazyValueInfoImpl {
346 /// Cached results from previous queries
347 LazyValueInfoCache TheCache;
349 /// This stack holds the state of the value solver during a query.
350 /// It basically emulates the callstack of the naive
351 /// recursive value lookup process.
352 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
354 /// Keeps track of which block-value pairs are in BlockValueStack.
355 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
357 /// Push BV onto BlockValueStack unless it's already in there.
358 /// Returns true on success.
359 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
360 if (!BlockValueSet.insert(BV).second)
361 return false; // It's already in the stack.
363 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
364 << BV.first->getName() << "\n");
365 BlockValueStack.push_back(BV);
369 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
370 const DataLayout &DL; ///< A mandatory DataLayout
372 /// Declaration of the llvm.experimental.guard() intrinsic,
373 /// if it exists in the module.
376 Optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB);
377 Optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
378 BasicBlock *T, Instruction *CxtI = nullptr);
380 // These methods process one work item and may add more. A false value
381 // returned means that the work item was not completely processed and must
382 // be revisited after going through the new items.
383 bool solveBlockValue(Value *Val, BasicBlock *BB);
384 Optional<ValueLatticeElement> solveBlockValueImpl(Value *Val, BasicBlock *BB);
385 Optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
387 Optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
389 Optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
391 Optional<ConstantRange> getRangeForOperand(unsigned Op, Instruction *I,
393 Optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
394 Instruction *I, BasicBlock *BB,
395 std::function<ConstantRange(const ConstantRange &,
396 const ConstantRange &)> OpFn);
397 Optional<ValueLatticeElement> solveBlockValueBinaryOp(BinaryOperator *BBI,
399 Optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
401 Optional<ValueLatticeElement> solveBlockValueOverflowIntrinsic(
402 WithOverflowInst *WO, BasicBlock *BB);
403 Optional<ValueLatticeElement> solveBlockValueSaturatingIntrinsic(
404 SaturatingInst *SI, BasicBlock *BB);
405 Optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
407 Optional<ValueLatticeElement> solveBlockValueExtractValue(
408 ExtractValueInst *EVI, BasicBlock *BB);
409 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
410 ValueLatticeElement &BBLV,
416 /// This is the query interface to determine the lattice
417 /// value for the specified Value* at the end of the specified block.
418 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
419 Instruction *CxtI = nullptr);
421 /// This is the query interface to determine the lattice
422 /// value for the specified Value* at the specified instruction (generally
423 /// from an assume intrinsic).
424 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
426 /// This is the query interface to determine the lattice
427 /// value for the specified Value* that is true on the specified edge.
428 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
430 Instruction *CxtI = nullptr);
432 /// Complete flush all previously computed values
437 /// Printing the LazyValueInfo Analysis.
438 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
439 LazyValueInfoAnnotatedWriter Writer(this, DTree);
440 F.print(OS, &Writer);
443 /// This is part of the update interface to inform the cache
444 /// that a block has been deleted.
445 void eraseBlock(BasicBlock *BB) {
446 TheCache.eraseBlock(BB);
449 /// This is the update interface to inform the cache that an edge from
450 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
451 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
453 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
455 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
457 } // end anonymous namespace
460 void LazyValueInfoImpl::solve() {
461 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
462 BlockValueStack.begin(), BlockValueStack.end());
464 unsigned processedCount = 0;
465 while (!BlockValueStack.empty()) {
467 // Abort if we have to process too many values to get a result for this one.
468 // Because of the design of the overdefined cache currently being per-block
469 // to avoid naming-related issues (IE it wants to try to give different
470 // results for the same name in different blocks), overdefined results don't
471 // get cached globally, which in turn means we will often try to rediscover
472 // the same overdefined result again and again. Once something like
473 // PredicateInfo is used in LVI or CVP, we should be able to make the
474 // overdefined cache global, and remove this throttle.
475 if (processedCount > MaxProcessedPerValue) {
477 dbgs() << "Giving up on stack because we are getting too deep\n");
478 // Fill in the original values
479 while (!StartingStack.empty()) {
480 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
481 TheCache.insertResult(e.second, e.first,
482 ValueLatticeElement::getOverdefined());
483 StartingStack.pop_back();
485 BlockValueSet.clear();
486 BlockValueStack.clear();
489 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
490 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
492 if (solveBlockValue(e.second, e.first)) {
493 // The work item was completely processed.
494 assert(BlockValueStack.back() == e && "Nothing should have been pushed!");
496 Optional<ValueLatticeElement> BBLV =
497 TheCache.getCachedValueInfo(e.second, e.first);
498 assert(BBLV && "Result should be in cache!");
500 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
504 BlockValueStack.pop_back();
505 BlockValueSet.erase(e);
507 // More work needs to be done before revisiting.
508 assert(BlockValueStack.back() != e && "Stack should have been pushed!");
513 Optional<ValueLatticeElement> LazyValueInfoImpl::getBlockValue(Value *Val,
515 // If already a constant, there is nothing to compute.
516 if (Constant *VC = dyn_cast<Constant>(Val))
517 return ValueLatticeElement::get(VC);
519 if (Optional<ValueLatticeElement> OptLatticeVal =
520 TheCache.getCachedValueInfo(Val, BB))
521 return OptLatticeVal;
523 // We have hit a cycle, assume overdefined.
524 if (!pushBlockValue({ BB, Val }))
525 return ValueLatticeElement::getOverdefined();
527 // Yet to be resolved.
531 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
532 switch (BBI->getOpcode()) {
534 case Instruction::Load:
535 case Instruction::Call:
536 case Instruction::Invoke:
537 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
538 if (isa<IntegerType>(BBI->getType())) {
539 return ValueLatticeElement::getRange(
540 getConstantRangeFromMetadata(*Ranges));
544 // Nothing known - will be intersected with other facts
545 return ValueLatticeElement::getOverdefined();
548 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
549 assert(!isa<Constant>(Val) && "Value should not be constant");
550 assert(!TheCache.getCachedValueInfo(Val, BB) &&
551 "Value should not be in cache");
553 // Hold off inserting this value into the Cache in case we have to return
554 // false and come back later.
555 Optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
557 // Work pushed, will revisit
560 TheCache.insertResult(Val, BB, *Res);
564 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueImpl(
565 Value *Val, BasicBlock *BB) {
566 Instruction *BBI = dyn_cast<Instruction>(Val);
567 if (!BBI || BBI->getParent() != BB)
568 return solveBlockValueNonLocal(Val, BB);
570 if (PHINode *PN = dyn_cast<PHINode>(BBI))
571 return solveBlockValuePHINode(PN, BB);
573 if (auto *SI = dyn_cast<SelectInst>(BBI))
574 return solveBlockValueSelect(SI, BB);
576 // If this value is a nonnull pointer, record it's range and bailout. Note
577 // that for all other pointer typed values, we terminate the search at the
578 // definition. We could easily extend this to look through geps, bitcasts,
579 // and the like to prove non-nullness, but it's not clear that's worth it
580 // compile time wise. The context-insensitive value walk done inside
581 // isKnownNonZero gets most of the profitable cases at much less expense.
582 // This does mean that we have a sensitivity to where the defining
583 // instruction is placed, even if it could legally be hoisted much higher.
584 // That is unfortunate.
585 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
586 if (PT && isKnownNonZero(BBI, DL))
587 return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
589 if (BBI->getType()->isIntegerTy()) {
590 if (auto *CI = dyn_cast<CastInst>(BBI))
591 return solveBlockValueCast(CI, BB);
593 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
594 return solveBlockValueBinaryOp(BO, BB);
596 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
597 return solveBlockValueExtractValue(EVI, BB);
599 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
600 return solveBlockValueIntrinsic(II, BB);
603 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
604 << "' - unknown inst def found.\n");
605 return getFromRangeMetadata(BBI);
608 static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
609 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
610 return L->getPointerAddressSpace() == 0 &&
611 GetUnderlyingObject(L->getPointerOperand(),
612 L->getModule()->getDataLayout()) == Ptr;
614 if (StoreInst *S = dyn_cast<StoreInst>(I)) {
615 return S->getPointerAddressSpace() == 0 &&
616 GetUnderlyingObject(S->getPointerOperand(),
617 S->getModule()->getDataLayout()) == Ptr;
619 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
620 if (MI->isVolatile()) return false;
622 // FIXME: check whether it has a valuerange that excludes zero?
623 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
624 if (!Len || Len->isZero()) return false;
626 if (MI->getDestAddressSpace() == 0)
627 if (GetUnderlyingObject(MI->getRawDest(),
628 MI->getModule()->getDataLayout()) == Ptr)
630 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
631 if (MTI->getSourceAddressSpace() == 0)
632 if (GetUnderlyingObject(MTI->getRawSource(),
633 MTI->getModule()->getDataLayout()) == Ptr)
639 /// Return true if the allocation associated with Val is ever dereferenced
640 /// within the given basic block. This establishes the fact Val is not null,
641 /// but does not imply that the memory at Val is dereferenceable. (Val may
642 /// point off the end of the dereferenceable part of the object.)
643 static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
644 assert(Val->getType()->isPointerTy());
646 const DataLayout &DL = BB->getModule()->getDataLayout();
647 Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
648 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
649 // inside InstructionDereferencesPointer either.
650 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
651 for (Instruction &I : *BB)
652 if (InstructionDereferencesPointer(&I, UnderlyingVal))
657 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueNonLocal(
658 Value *Val, BasicBlock *BB) {
659 ValueLatticeElement Result; // Start Undefined.
661 // If this is the entry block, we must be asking about an argument. The
662 // value is overdefined.
663 if (BB == &BB->getParent()->getEntryBlock()) {
664 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
665 // Before giving up, see if we can prove the pointer non-null local to
666 // this particular block.
667 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
669 (isKnownNonZero(Val, DL) ||
670 (isObjectDereferencedInBlock(Val, BB) &&
671 !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace()))))
672 return ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
674 return ValueLatticeElement::getOverdefined();
677 // Loop over all of our predecessors, merging what we know from them into
678 // result. If we encounter an unexplored predecessor, we eagerly explore it
679 // in a depth first manner. In practice, this has the effect of discovering
680 // paths we can't analyze eagerly without spending compile times analyzing
681 // other paths. This heuristic benefits from the fact that predecessors are
682 // frequently arranged such that dominating ones come first and we quickly
683 // find a path to function entry. TODO: We should consider explicitly
684 // canonicalizing to make this true rather than relying on this happy
686 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
687 Optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, *PI, BB);
689 // Explore that input, then return here
692 Result.mergeIn(*EdgeResult);
694 // If we hit overdefined, exit early. The BlockVals entry is already set
696 if (Result.isOverdefined()) {
697 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
698 << "' - overdefined because of pred (non local).\n");
699 // Before giving up, see if we can prove the pointer non-null local to
700 // this particular block.
701 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
702 if (PTy && isObjectDereferencedInBlock(Val, BB) &&
703 !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())) {
704 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
711 // Return the merged value, which is more precise than 'overdefined'.
712 assert(!Result.isOverdefined());
716 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValuePHINode(
717 PHINode *PN, BasicBlock *BB) {
718 ValueLatticeElement Result; // Start Undefined.
720 // Loop over all of our predecessors, merging what we know from them into
721 // result. See the comment about the chosen traversal order in
722 // solveBlockValueNonLocal; the same reasoning applies here.
723 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
724 BasicBlock *PhiBB = PN->getIncomingBlock(i);
725 Value *PhiVal = PN->getIncomingValue(i);
726 // Note that we can provide PN as the context value to getEdgeValue, even
727 // though the results will be cached, because PN is the value being used as
728 // the cache key in the caller.
729 Optional<ValueLatticeElement> EdgeResult =
730 getEdgeValue(PhiVal, PhiBB, BB, PN);
732 // Explore that input, then return here
735 Result.mergeIn(*EdgeResult);
737 // If we hit overdefined, exit early. The BlockVals entry is already set
739 if (Result.isOverdefined()) {
740 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
741 << "' - overdefined because of pred (local).\n");
747 // Return the merged value, which is more precise than 'overdefined'.
748 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
752 static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
753 bool isTrueDest = true);
755 // If we can determine a constraint on the value given conditions assumed by
756 // the program, intersect those constraints with BBLV
757 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
758 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
759 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
763 BasicBlock *BB = BBI->getParent();
764 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
768 // Only check assumes in the block of the context instruction. Other
769 // assumes will have already been taken into account when the value was
770 // propagated from predecessor blocks.
771 auto *I = cast<CallInst>(AssumeVH);
772 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
775 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
778 // If guards are not used in the module, don't spend time looking for them
779 if (!GuardDecl || GuardDecl->use_empty())
782 if (BBI->getIterator() == BB->begin())
784 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()),
786 Value *Cond = nullptr;
787 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
788 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
792 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect(
793 SelectInst *SI, BasicBlock *BB) {
794 // Recurse on our inputs if needed
795 Optional<ValueLatticeElement> OptTrueVal =
796 getBlockValue(SI->getTrueValue(), BB);
799 ValueLatticeElement &TrueVal = *OptTrueVal;
801 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
802 // extra slots in the table if we can.
803 if (TrueVal.isOverdefined())
804 return ValueLatticeElement::getOverdefined();
806 Optional<ValueLatticeElement> OptFalseVal =
807 getBlockValue(SI->getFalseValue(), BB);
810 ValueLatticeElement &FalseVal = *OptFalseVal;
812 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
813 // extra slots in the table if we can.
814 if (FalseVal.isOverdefined())
815 return ValueLatticeElement::getOverdefined();
817 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
818 const ConstantRange &TrueCR = TrueVal.getConstantRange();
819 const ConstantRange &FalseCR = FalseVal.getConstantRange();
820 Value *LHS = nullptr;
821 Value *RHS = nullptr;
822 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
823 // Is this a min specifically of our two inputs? (Avoid the risk of
824 // ValueTracking getting smarter looking back past our immediate inputs.)
825 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
826 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
827 ConstantRange ResultCR = [&]() {
828 switch (SPR.Flavor) {
830 llvm_unreachable("unexpected minmax type!");
831 case SPF_SMIN: /// Signed minimum
832 return TrueCR.smin(FalseCR);
833 case SPF_UMIN: /// Unsigned minimum
834 return TrueCR.umin(FalseCR);
835 case SPF_SMAX: /// Signed maximum
836 return TrueCR.smax(FalseCR);
837 case SPF_UMAX: /// Unsigned maximum
838 return TrueCR.umax(FalseCR);
841 return ValueLatticeElement::getRange(
842 ResultCR, TrueVal.isConstantRangeIncludingUndef() |
843 FalseVal.isConstantRangeIncludingUndef());
846 if (SPR.Flavor == SPF_ABS) {
847 if (LHS == SI->getTrueValue())
848 return ValueLatticeElement::getRange(
849 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
850 if (LHS == SI->getFalseValue())
851 return ValueLatticeElement::getRange(
852 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
855 if (SPR.Flavor == SPF_NABS) {
856 ConstantRange Zero(APInt::getNullValue(TrueCR.getBitWidth()));
857 if (LHS == SI->getTrueValue())
858 return ValueLatticeElement::getRange(
859 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
860 if (LHS == SI->getFalseValue())
861 return ValueLatticeElement::getRange(
862 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
866 // Can we constrain the facts about the true and false values by using the
867 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
868 // TODO: We could potentially refine an overdefined true value above.
869 Value *Cond = SI->getCondition();
870 TrueVal = intersect(TrueVal,
871 getValueFromCondition(SI->getTrueValue(), Cond, true));
872 FalseVal = intersect(FalseVal,
873 getValueFromCondition(SI->getFalseValue(), Cond, false));
875 // Handle clamp idioms such as:
876 // %24 = constantrange<0, 17>
877 // %39 = icmp eq i32 %24, 0
878 // %40 = add i32 %24, -1
879 // %siv.next = select i1 %39, i32 16, i32 %40
880 // %siv.next = constantrange<0, 17> not <-1, 17>
881 // In general, this can handle any clamp idiom which tests the edge
882 // condition via an equality or inequality.
883 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
884 ICmpInst::Predicate Pred = ICI->getPredicate();
885 Value *A = ICI->getOperand(0);
886 if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
887 auto addConstants = [](ConstantInt *A, ConstantInt *B) {
888 assert(A->getType() == B->getType());
889 return ConstantInt::get(A->getType(), A->getValue() + B->getValue());
891 // See if either input is A + C2, subject to the constraint from the
892 // condition that A != C when that input is used. We can assume that
893 // that input doesn't include C + C2.
894 ConstantInt *CIAdded;
897 case ICmpInst::ICMP_EQ:
898 if (match(SI->getFalseValue(), m_Add(m_Specific(A),
899 m_ConstantInt(CIAdded)))) {
900 auto ResNot = addConstants(CIBase, CIAdded);
901 FalseVal = intersect(FalseVal,
902 ValueLatticeElement::getNot(ResNot));
905 case ICmpInst::ICMP_NE:
906 if (match(SI->getTrueValue(), m_Add(m_Specific(A),
907 m_ConstantInt(CIAdded)))) {
908 auto ResNot = addConstants(CIBase, CIAdded);
909 TrueVal = intersect(TrueVal,
910 ValueLatticeElement::getNot(ResNot));
917 ValueLatticeElement Result = TrueVal;
918 Result.mergeIn(FalseVal);
922 Optional<ConstantRange> LazyValueInfoImpl::getRangeForOperand(unsigned Op,
925 Optional<ValueLatticeElement> OptVal = getBlockValue(I->getOperand(Op), BB);
929 ValueLatticeElement &Val = *OptVal;
930 intersectAssumeOrGuardBlockValueConstantRange(I->getOperand(Op), Val, I);
931 if (Val.isConstantRange())
932 return Val.getConstantRange();
934 const unsigned OperandBitWidth =
935 DL.getTypeSizeInBits(I->getOperand(Op)->getType());
936 return ConstantRange::getFull(OperandBitWidth);
939 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
940 CastInst *CI, BasicBlock *BB) {
941 // Without knowing how wide the input is, we can't analyze it in any useful
943 if (!CI->getOperand(0)->getType()->isSized())
944 return ValueLatticeElement::getOverdefined();
946 // Filter out casts we don't know how to reason about before attempting to
947 // recurse on our operand. This can cut a long search short if we know we're
948 // not going to be able to get any useful information anways.
949 switch (CI->getOpcode()) {
950 case Instruction::Trunc:
951 case Instruction::SExt:
952 case Instruction::ZExt:
953 case Instruction::BitCast:
956 // Unhandled instructions are overdefined.
957 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
958 << "' - overdefined (unknown cast).\n");
959 return ValueLatticeElement::getOverdefined();
962 // Figure out the range of the LHS. If that fails, we still apply the
963 // transfer rule on the full set since we may be able to locally infer
964 // interesting facts.
965 Optional<ConstantRange> LHSRes = getRangeForOperand(0, CI, BB);
966 if (!LHSRes.hasValue())
967 // More work to do before applying this transfer rule.
969 const ConstantRange &LHSRange = LHSRes.getValue();
971 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
973 // NOTE: We're currently limited by the set of operations that ConstantRange
974 // can evaluate symbolically. Enhancing that set will allows us to analyze
976 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
980 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
981 Instruction *I, BasicBlock *BB,
982 std::function<ConstantRange(const ConstantRange &,
983 const ConstantRange &)> OpFn) {
984 // Figure out the ranges of the operands. If that fails, use a
985 // conservative range, but apply the transfer rule anyways. This
986 // lets us pick up facts from expressions like "and i32 (call i32
988 Optional<ConstantRange> LHSRes = getRangeForOperand(0, I, BB);
989 Optional<ConstantRange> RHSRes = getRangeForOperand(1, I, BB);
990 if (!LHSRes.hasValue() || !RHSRes.hasValue())
991 // More work to do before applying this transfer rule.
994 const ConstantRange &LHSRange = LHSRes.getValue();
995 const ConstantRange &RHSRange = RHSRes.getValue();
996 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
999 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOp(
1000 BinaryOperator *BO, BasicBlock *BB) {
1001 assert(BO->getOperand(0)->getType()->isSized() &&
1002 "all operands to binary operators are sized");
1003 if (BO->getOpcode() == Instruction::Xor) {
1004 // Xor is the only operation not supported by ConstantRange::binaryOp().
1005 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1006 << "' - overdefined (unknown binary operator).\n");
1007 return ValueLatticeElement::getOverdefined();
1010 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1011 unsigned NoWrapKind = 0;
1012 if (OBO->hasNoUnsignedWrap())
1013 NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap;
1014 if (OBO->hasNoSignedWrap())
1015 NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap;
1017 return solveBlockValueBinaryOpImpl(
1019 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1020 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1024 return solveBlockValueBinaryOpImpl(
1025 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1026 return CR1.binaryOp(BO->getOpcode(), CR2);
1030 Optional<ValueLatticeElement>
1031 LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1033 return solveBlockValueBinaryOpImpl(
1034 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1035 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1039 Optional<ValueLatticeElement>
1040 LazyValueInfoImpl::solveBlockValueSaturatingIntrinsic(SaturatingInst *SI,
1042 switch (SI->getIntrinsicID()) {
1043 case Intrinsic::uadd_sat:
1044 return solveBlockValueBinaryOpImpl(
1045 SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) {
1046 return CR1.uadd_sat(CR2);
1048 case Intrinsic::usub_sat:
1049 return solveBlockValueBinaryOpImpl(
1050 SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) {
1051 return CR1.usub_sat(CR2);
1053 case Intrinsic::sadd_sat:
1054 return solveBlockValueBinaryOpImpl(
1055 SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) {
1056 return CR1.sadd_sat(CR2);
1058 case Intrinsic::ssub_sat:
1059 return solveBlockValueBinaryOpImpl(
1060 SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) {
1061 return CR1.ssub_sat(CR2);
1064 llvm_unreachable("All llvm.sat intrinsic are handled.");
1068 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueIntrinsic(
1069 IntrinsicInst *II, BasicBlock *BB) {
1070 if (auto *SI = dyn_cast<SaturatingInst>(II))
1071 return solveBlockValueSaturatingIntrinsic(SI, BB);
1073 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1074 << "' - overdefined (unknown intrinsic).\n");
1075 return ValueLatticeElement::getOverdefined();
1078 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueExtractValue(
1079 ExtractValueInst *EVI, BasicBlock *BB) {
1080 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1081 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1082 return solveBlockValueOverflowIntrinsic(WO, BB);
1084 // Handle extractvalue of insertvalue to allow further simplification
1085 // based on replaced with.overflow intrinsics.
1086 if (Value *V = SimplifyExtractValueInst(
1087 EVI->getAggregateOperand(), EVI->getIndices(),
1088 EVI->getModule()->getDataLayout()))
1089 return getBlockValue(V, BB);
1091 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1092 << "' - overdefined (unknown extractvalue).\n");
1093 return ValueLatticeElement::getOverdefined();
1096 static bool matchICmpOperand(const APInt *&Offset, Value *LHS, Value *Val,
1097 ICmpInst::Predicate Pred) {
1101 // Handle range checking idiom produced by InstCombine. We will subtract the
1102 // offset from the allowed range for RHS in this case.
1103 if (match(LHS, m_Add(m_Specific(Val), m_APInt(Offset))))
1106 // If (x | y) < C, then (x < C) && (y < C).
1107 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1108 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1111 // If (x & y) > C, then (x > C) && (y > C).
1112 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1113 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1119 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
1121 Value *LHS = ICI->getOperand(0);
1122 Value *RHS = ICI->getOperand(1);
1124 // Get the predicate that must hold along the considered edge.
1125 CmpInst::Predicate EdgePred =
1126 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1128 if (isa<Constant>(RHS)) {
1129 if (ICI->isEquality() && LHS == Val) {
1130 if (EdgePred == ICmpInst::ICMP_EQ)
1131 return ValueLatticeElement::get(cast<Constant>(RHS));
1132 else if (!isa<UndefValue>(RHS))
1133 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1137 if (!Val->getType()->isIntegerTy())
1138 return ValueLatticeElement::getOverdefined();
1140 const APInt *Offset = nullptr;
1141 if (!matchICmpOperand(Offset, LHS, Val, EdgePred)) {
1142 std::swap(LHS, RHS);
1143 EdgePred = CmpInst::getSwappedPredicate(EdgePred);
1144 if (!matchICmpOperand(Offset, LHS, Val, EdgePred))
1145 return ValueLatticeElement::getOverdefined();
1148 // Calculate the range of values that are allowed by the comparison.
1149 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1150 /*isFullSet=*/true);
1151 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1152 RHSRange = ConstantRange(CI->getValue());
1153 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1154 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1155 RHSRange = getConstantRangeFromMetadata(*Ranges);
1157 // If we're interested in the false dest, invert the condition
1158 ConstantRange TrueValues =
1159 ConstantRange::makeAllowedICmpRegion(EdgePred, RHSRange);
1161 if (Offset) // Apply the offset from above.
1162 TrueValues = TrueValues.subtract(*Offset);
1164 return ValueLatticeElement::getRange(std::move(TrueValues));
1167 // Handle conditions of the form
1168 // extractvalue(op.with.overflow(%x, C), 1).
1169 static ValueLatticeElement getValueFromOverflowCondition(
1170 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1171 // TODO: This only works with a constant RHS for now. We could also compute
1172 // the range of the RHS, but this doesn't fit into the current structure of
1173 // the edge value calculation.
1175 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1176 return ValueLatticeElement::getOverdefined();
1178 // Calculate the possible values of %x for which no overflow occurs.
1179 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1180 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1182 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1183 // constrained to it's inverse (all values that might cause overflow).
1185 NWR = NWR.inverse();
1186 return ValueLatticeElement::getRange(NWR);
1189 static ValueLatticeElement
1190 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1191 SmallDenseMap<Value*, ValueLatticeElement> &Visited);
1193 static ValueLatticeElement
1194 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
1195 SmallDenseMap<Value*, ValueLatticeElement> &Visited) {
1196 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1197 return getValueFromICmpCondition(Val, ICI, isTrueDest);
1199 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1200 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1201 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1202 return getValueFromOverflowCondition(Val, WO, isTrueDest);
1204 // Handle conditions in the form of (cond1 && cond2), we know that on the
1205 // true dest path both of the conditions hold. Similarly for conditions of
1206 // the form (cond1 || cond2), we know that on the false dest path neither
1208 BinaryOperator *BO = dyn_cast<BinaryOperator>(Cond);
1209 if (!BO || (isTrueDest && BO->getOpcode() != BinaryOperator::And) ||
1210 (!isTrueDest && BO->getOpcode() != BinaryOperator::Or))
1211 return ValueLatticeElement::getOverdefined();
1213 // Prevent infinite recursion if Cond references itself as in this example:
1214 // Cond: "%tmp4 = and i1 %tmp4, undef"
1215 // BL: "%tmp4 = and i1 %tmp4, undef"
1217 Value *BL = BO->getOperand(0);
1218 Value *BR = BO->getOperand(1);
1219 if (BL == Cond || BR == Cond)
1220 return ValueLatticeElement::getOverdefined();
1222 return intersect(getValueFromCondition(Val, BL, isTrueDest, Visited),
1223 getValueFromCondition(Val, BR, isTrueDest, Visited));
1226 static ValueLatticeElement
1227 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1228 SmallDenseMap<Value*, ValueLatticeElement> &Visited) {
1229 auto I = Visited.find(Cond);
1230 if (I != Visited.end())
1233 auto Result = getValueFromConditionImpl(Val, Cond, isTrueDest, Visited);
1234 Visited[Cond] = Result;
1238 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
1240 assert(Cond && "precondition");
1241 SmallDenseMap<Value*, ValueLatticeElement> Visited;
1242 return getValueFromCondition(Val, Cond, isTrueDest, Visited);
1245 // Return true if Usr has Op as an operand, otherwise false.
1246 static bool usesOperand(User *Usr, Value *Op) {
1247 return find(Usr->operands(), Op) != Usr->op_end();
1250 // Return true if the instruction type of Val is supported by
1251 // constantFoldUser(). Currently CastInst and BinaryOperator only. Call this
1252 // before calling constantFoldUser() to find out if it's even worth attempting
1254 static bool isOperationFoldable(User *Usr) {
1255 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr);
1258 // Check if Usr can be simplified to an integer constant when the value of one
1259 // of its operands Op is an integer constant OpConstVal. If so, return it as an
1260 // lattice value range with a single element or otherwise return an overdefined
1262 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1263 const APInt &OpConstVal,
1264 const DataLayout &DL) {
1265 assert(isOperationFoldable(Usr) && "Precondition");
1266 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1267 // Check if Usr can be simplified to a constant.
1268 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1269 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1270 if (auto *C = dyn_cast_or_null<ConstantInt>(
1271 SimplifyCastInst(CI->getOpcode(), OpConst,
1272 CI->getDestTy(), DL))) {
1273 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1275 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1276 bool Op0Match = BO->getOperand(0) == Op;
1277 bool Op1Match = BO->getOperand(1) == Op;
1278 assert((Op0Match || Op1Match) &&
1279 "Operand 0 nor Operand 1 isn't a match");
1280 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1281 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1282 if (auto *C = dyn_cast_or_null<ConstantInt>(
1283 SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1284 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1287 return ValueLatticeElement::getOverdefined();
1290 /// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1291 /// Val is not constrained on the edge. Result is unspecified if return value
1293 static Optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
1296 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1297 // know that v != 0.
1298 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1299 // If this is a conditional branch and only one successor goes to BBTo, then
1300 // we may be able to infer something from the condition.
1301 if (BI->isConditional() &&
1302 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1303 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1304 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1305 "BBTo isn't a successor of BBFrom");
1306 Value *Condition = BI->getCondition();
1308 // If V is the condition of the branch itself, then we know exactly what
1310 if (Condition == Val)
1311 return ValueLatticeElement::get(ConstantInt::get(
1312 Type::getInt1Ty(Val->getContext()), isTrueDest));
1314 // If the condition of the branch is an equality comparison, we may be
1315 // able to infer the value.
1316 ValueLatticeElement Result = getValueFromCondition(Val, Condition,
1318 if (!Result.isOverdefined())
1321 if (User *Usr = dyn_cast<User>(Val)) {
1322 assert(Result.isOverdefined() && "Result isn't overdefined");
1323 // Check with isOperationFoldable() first to avoid linearly iterating
1324 // over the operands unnecessarily which can be expensive for
1325 // instructions with many operands.
1326 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1327 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1328 if (usesOperand(Usr, Condition)) {
1329 // If Val has Condition as an operand and Val can be folded into a
1330 // constant with either Condition == true or Condition == false,
1331 // propagate the constant.
1333 // ; %Val is true on the edge to %then.
1334 // %Val = and i1 %Condition, true.
1335 // br %Condition, label %then, label %else
1336 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1337 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1339 // If one of Val's operand has an inferred value, we may be able to
1340 // infer the value of Val.
1342 // ; %Val is 94 on the edge to %then.
1343 // %Val = add i8 %Op, 1
1344 // %Condition = icmp eq i8 %Op, 93
1345 // br i1 %Condition, label %then, label %else
1346 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1347 Value *Op = Usr->getOperand(i);
1348 ValueLatticeElement OpLatticeVal =
1349 getValueFromCondition(Op, Condition, isTrueDest);
1350 if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) {
1351 Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL);
1358 if (!Result.isOverdefined())
1363 // If the edge was formed by a switch on the value, then we may know exactly
1365 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1366 Value *Condition = SI->getCondition();
1367 if (!isa<IntegerType>(Val->getType()))
1369 bool ValUsesConditionAndMayBeFoldable = false;
1370 if (Condition != Val) {
1371 // Check if Val has Condition as an operand.
1372 if (User *Usr = dyn_cast<User>(Val))
1373 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1374 usesOperand(Usr, Condition);
1375 if (!ValUsesConditionAndMayBeFoldable)
1378 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1379 "Condition != Val nor Val doesn't use Condition");
1381 bool DefaultCase = SI->getDefaultDest() == BBTo;
1382 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1383 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1385 for (auto Case : SI->cases()) {
1386 APInt CaseValue = Case.getCaseValue()->getValue();
1387 ConstantRange EdgeVal(CaseValue);
1388 if (ValUsesConditionAndMayBeFoldable) {
1389 User *Usr = cast<User>(Val);
1390 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1391 ValueLatticeElement EdgeLatticeVal =
1392 constantFoldUser(Usr, Condition, CaseValue, DL);
1393 if (EdgeLatticeVal.isOverdefined())
1395 EdgeVal = EdgeLatticeVal.getConstantRange();
1398 // It is possible that the default destination is the destination of
1399 // some cases. We cannot perform difference for those cases.
1400 // We know Condition != CaseValue in BBTo. In some cases we can use
1401 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1402 // only do this when f is identity (i.e. Val == Condition), but we
1403 // should be able to do this for any injective f.
1404 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1405 EdgesVals = EdgesVals.difference(EdgeVal);
1406 } else if (Case.getCaseSuccessor() == BBTo)
1407 EdgesVals = EdgesVals.unionWith(EdgeVal);
1409 return ValueLatticeElement::getRange(std::move(EdgesVals));
1414 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1415 /// the basic block if the edge does not constrain Val.
1416 Optional<ValueLatticeElement> LazyValueInfoImpl::getEdgeValue(
1417 Value *Val, BasicBlock *BBFrom, BasicBlock *BBTo, Instruction *CxtI) {
1418 // If already a constant, there is nothing to compute.
1419 if (Constant *VC = dyn_cast<Constant>(Val))
1420 return ValueLatticeElement::get(VC);
1422 ValueLatticeElement LocalResult = getEdgeValueLocal(Val, BBFrom, BBTo)
1423 .getValueOr(ValueLatticeElement::getOverdefined());
1424 if (hasSingleValue(LocalResult))
1425 // Can't get any more precise here
1428 Optional<ValueLatticeElement> OptInBlock = getBlockValue(Val, BBFrom);
1431 ValueLatticeElement &InBlock = *OptInBlock;
1433 // Try to intersect ranges of the BB and the constraint on the edge.
1434 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock,
1435 BBFrom->getTerminator());
1436 // We can use the context instruction (generically the ultimate instruction
1437 // the calling pass is trying to simplify) here, even though the result of
1438 // this function is generally cached when called from the solve* functions
1439 // (and that cached result might be used with queries using a different
1440 // context instruction), because when this function is called from the solve*
1441 // functions, the context instruction is not provided. When called from
1442 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1443 // but then the result is not cached.
1444 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1446 return intersect(LocalResult, InBlock);
1449 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1450 Instruction *CxtI) {
1451 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1452 << BB->getName() << "'\n");
1454 assert(BlockValueStack.empty() && BlockValueSet.empty());
1455 Optional<ValueLatticeElement> OptResult = getBlockValue(V, BB);
1458 OptResult = getBlockValue(V, BB);
1459 assert(OptResult && "Value not available after solving");
1461 ValueLatticeElement Result = *OptResult;
1462 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1464 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1468 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1469 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1472 if (auto *C = dyn_cast<Constant>(V))
1473 return ValueLatticeElement::get(C);
1475 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1476 if (auto *I = dyn_cast<Instruction>(V))
1477 Result = getFromRangeMetadata(I);
1478 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1480 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1484 ValueLatticeElement LazyValueInfoImpl::
1485 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1486 Instruction *CxtI) {
1487 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1488 << FromBB->getName() << "' to '" << ToBB->getName()
1491 Optional<ValueLatticeElement> Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1494 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1495 assert(Result && "More work to do after problem solved?");
1498 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1502 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1503 BasicBlock *NewSucc) {
1504 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1507 //===----------------------------------------------------------------------===//
1508 // LazyValueInfo Impl
1509 //===----------------------------------------------------------------------===//
1511 /// This lazily constructs the LazyValueInfoImpl.
1512 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1515 assert(M && "getCache() called with a null Module");
1516 const DataLayout &DL = M->getDataLayout();
1517 Function *GuardDecl = M->getFunction(
1518 Intrinsic::getName(Intrinsic::experimental_guard));
1519 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1521 return *static_cast<LazyValueInfoImpl*>(PImpl);
1524 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1525 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1526 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1529 getImpl(Info.PImpl, Info.AC, F.getParent()).clear();
1535 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1536 AU.setPreservesAll();
1537 AU.addRequired<AssumptionCacheTracker>();
1538 AU.addRequired<TargetLibraryInfoWrapperPass>();
1541 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1543 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1545 void LazyValueInfo::releaseMemory() {
1546 // If the cache was allocated, free it.
1548 delete &getImpl(PImpl, AC, nullptr);
1553 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1554 FunctionAnalysisManager::Invalidator &Inv) {
1555 // We need to invalidate if we have either failed to preserve this analyses
1556 // result directly or if any of its dependencies have been invalidated.
1557 auto PAC = PA.getChecker<LazyValueAnalysis>();
1558 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1564 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1566 LazyValueInfo LazyValueAnalysis::run(Function &F,
1567 FunctionAnalysisManager &FAM) {
1568 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1569 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1571 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI);
1574 /// Returns true if we can statically tell that this value will never be a
1575 /// "useful" constant. In practice, this means we've got something like an
1576 /// alloca or a malloc call for which a comparison against a constant can
1577 /// only be guarding dead code. Note that we are potentially giving up some
1578 /// precision in dead code (a constant result) in favour of avoiding a
1579 /// expensive search for a easily answered common query.
1580 static bool isKnownNonConstant(Value *V) {
1581 V = V->stripPointerCasts();
1582 // The return val of alloc cannot be a Constant.
1583 if (isa<AllocaInst>(V))
1588 Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
1589 Instruction *CxtI) {
1590 // Bail out early if V is known not to be a Constant.
1591 if (isKnownNonConstant(V))
1594 ValueLatticeElement Result =
1595 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1597 if (Result.isConstant())
1598 return Result.getConstant();
1599 if (Result.isConstantRange()) {
1600 const ConstantRange &CR = Result.getConstantRange();
1601 if (const APInt *SingleVal = CR.getSingleElement())
1602 return ConstantInt::get(V->getContext(), *SingleVal);
1607 ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
1609 bool UndefAllowed) {
1610 assert(V->getType()->isIntegerTy());
1611 unsigned Width = V->getType()->getIntegerBitWidth();
1612 ValueLatticeElement Result =
1613 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1614 if (Result.isUnknown())
1615 return ConstantRange::getEmpty(Width);
1616 if (Result.isConstantRange(UndefAllowed))
1617 return Result.getConstantRange(UndefAllowed);
1618 // We represent ConstantInt constants as constant ranges but other kinds
1619 // of integer constants, i.e. ConstantExpr will be tagged as constants
1620 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1621 "ConstantInt value must be represented as constantrange");
1622 return ConstantRange::getFull(Width);
1625 /// Determine whether the specified value is known to be a
1626 /// constant on the specified edge. Return null if not.
1627 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1629 Instruction *CxtI) {
1630 Module *M = FromBB->getModule();
1631 ValueLatticeElement Result =
1632 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1634 if (Result.isConstant())
1635 return Result.getConstant();
1636 if (Result.isConstantRange()) {
1637 const ConstantRange &CR = Result.getConstantRange();
1638 if (const APInt *SingleVal = CR.getSingleElement())
1639 return ConstantInt::get(V->getContext(), *SingleVal);
1644 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1647 Instruction *CxtI) {
1648 unsigned Width = V->getType()->getIntegerBitWidth();
1649 Module *M = FromBB->getModule();
1650 ValueLatticeElement Result =
1651 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1653 if (Result.isUnknown())
1654 return ConstantRange::getEmpty(Width);
1655 if (Result.isConstantRange())
1656 return Result.getConstantRange();
1657 // We represent ConstantInt constants as constant ranges but other kinds
1658 // of integer constants, i.e. ConstantExpr will be tagged as constants
1659 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1660 "ConstantInt value must be represented as constantrange");
1661 return ConstantRange::getFull(Width);
1664 static LazyValueInfo::Tristate
1665 getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1666 const DataLayout &DL, TargetLibraryInfo *TLI) {
1667 // If we know the value is a constant, evaluate the conditional.
1668 Constant *Res = nullptr;
1669 if (Val.isConstant()) {
1670 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1671 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1672 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1673 return LazyValueInfo::Unknown;
1676 if (Val.isConstantRange()) {
1677 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1678 if (!CI) return LazyValueInfo::Unknown;
1680 const ConstantRange &CR = Val.getConstantRange();
1681 if (Pred == ICmpInst::ICMP_EQ) {
1682 if (!CR.contains(CI->getValue()))
1683 return LazyValueInfo::False;
1685 if (CR.isSingleElement())
1686 return LazyValueInfo::True;
1687 } else if (Pred == ICmpInst::ICMP_NE) {
1688 if (!CR.contains(CI->getValue()))
1689 return LazyValueInfo::True;
1691 if (CR.isSingleElement())
1692 return LazyValueInfo::False;
1694 // Handle more complex predicates.
1695 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1696 (ICmpInst::Predicate)Pred, CI->getValue());
1697 if (TrueValues.contains(CR))
1698 return LazyValueInfo::True;
1699 if (TrueValues.inverse().contains(CR))
1700 return LazyValueInfo::False;
1702 return LazyValueInfo::Unknown;
1705 if (Val.isNotConstant()) {
1706 // If this is an equality comparison, we can try to fold it knowing that
1708 if (Pred == ICmpInst::ICMP_EQ) {
1709 // !C1 == C -> false iff C1 == C.
1710 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1711 Val.getNotConstant(), C, DL,
1713 if (Res->isNullValue())
1714 return LazyValueInfo::False;
1715 } else if (Pred == ICmpInst::ICMP_NE) {
1716 // !C1 != C -> true iff C1 == C.
1717 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1718 Val.getNotConstant(), C, DL,
1720 if (Res->isNullValue())
1721 return LazyValueInfo::True;
1723 return LazyValueInfo::Unknown;
1726 return LazyValueInfo::Unknown;
1729 /// Determine whether the specified value comparison with a constant is known to
1730 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1731 LazyValueInfo::Tristate
1732 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1733 BasicBlock *FromBB, BasicBlock *ToBB,
1734 Instruction *CxtI) {
1735 Module *M = FromBB->getModule();
1736 ValueLatticeElement Result =
1737 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1739 return getPredicateResult(Pred, C, Result, M->getDataLayout(), TLI);
1742 LazyValueInfo::Tristate
1743 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1744 Instruction *CxtI) {
1745 // Is or is not NonNull are common predicates being queried. If
1746 // isKnownNonZero can tell us the result of the predicate, we can
1747 // return it quickly. But this is only a fastpath, and falling
1748 // through would still be correct.
1749 Module *M = CxtI->getModule();
1750 const DataLayout &DL = M->getDataLayout();
1751 if (V->getType()->isPointerTy() && C->isNullValue() &&
1752 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1753 if (Pred == ICmpInst::ICMP_EQ)
1754 return LazyValueInfo::False;
1755 else if (Pred == ICmpInst::ICMP_NE)
1756 return LazyValueInfo::True;
1758 ValueLatticeElement Result = getImpl(PImpl, AC, M).getValueAt(V, CxtI);
1759 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1763 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1764 // LVI as a whole tries to compute a lattice value which is conservatively
1765 // correct at a given location. In this case, we have a predicate which we
1766 // weren't able to prove about the merged result, and we're pushing that
1767 // predicate back along each incoming edge to see if we can prove it
1768 // separately for each input. As a motivating example, consider:
1770 // %v1 = ... ; constantrange<1, 5>
1773 // %v2 = ... ; constantrange<10, 20>
1776 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1777 // %pred = icmp eq i32 %phi, 8
1778 // We can't tell from the lattice value for '%phi' that '%pred' is false
1779 // along each path, but by checking the predicate over each input separately,
1781 // We limit the search to one step backwards from the current BB and value.
1782 // We could consider extending this to search further backwards through the
1783 // CFG and/or value graph, but there are non-obvious compile time vs quality
1786 BasicBlock *BB = CxtI->getParent();
1788 // Function entry or an unreachable block. Bail to avoid confusing
1790 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1794 // If V is a PHI node in the same block as the context, we need to ask
1795 // questions about the predicate as applied to the incoming value along
1796 // each edge. This is useful for eliminating cases where the predicate is
1797 // known along all incoming edges.
1798 if (auto *PHI = dyn_cast<PHINode>(V))
1799 if (PHI->getParent() == BB) {
1800 Tristate Baseline = Unknown;
1801 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1802 Value *Incoming = PHI->getIncomingValue(i);
1803 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1804 // Note that PredBB may be BB itself.
1805 Tristate Result = getPredicateOnEdge(Pred, Incoming, C, PredBB, BB,
1808 // Keep going as long as we've seen a consistent known result for
1810 Baseline = (i == 0) ? Result /* First iteration */
1811 : (Baseline == Result ? Baseline : Unknown); /* All others */
1812 if (Baseline == Unknown)
1815 if (Baseline != Unknown)
1819 // For a comparison where the V is outside this block, it's possible
1820 // that we've branched on it before. Look to see if the value is known
1821 // on all incoming edges.
1822 if (!isa<Instruction>(V) ||
1823 cast<Instruction>(V)->getParent() != BB) {
1824 // For predecessor edge, determine if the comparison is true or false
1825 // on that edge. If they're all true or all false, we can conclude
1826 // the value of the comparison in this block.
1827 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1828 if (Baseline != Unknown) {
1829 // Check that all remaining incoming values match the first one.
1830 while (++PI != PE) {
1831 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1832 if (Ret != Baseline) break;
1834 // If we terminated early, then one of the values didn't match.
1844 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1845 BasicBlock *NewSucc) {
1847 getImpl(PImpl, AC, PredBB->getModule())
1848 .threadEdge(PredBB, OldSucc, NewSucc);
1852 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1854 getImpl(PImpl, AC, BB->getModule()).eraseBlock(BB);
1859 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1861 getImpl(PImpl, AC, F.getParent()).printLVI(F, DTree, OS);
1865 // Print the LVI for the function arguments at the start of each basic block.
1866 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1867 const BasicBlock *BB, formatted_raw_ostream &OS) {
1868 // Find if there are latticevalues defined for arguments of the function.
1869 auto *F = BB->getParent();
1870 for (auto &Arg : F->args()) {
1871 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1872 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1873 if (Result.isUnknown())
1875 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1879 // This function prints the LVI analysis for the instruction I at the beginning
1880 // of various basic blocks. It relies on calculated values that are stored in
1881 // the LazyValueInfoCache, and in the absence of cached values, recalculate the
1882 // LazyValueInfo for `I`, and print that info.
1883 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1884 const Instruction *I, formatted_raw_ostream &OS) {
1886 auto *ParentBB = I->getParent();
1887 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
1888 // We can generate (solve) LVI values only for blocks that are dominated by
1889 // the I's parent. However, to avoid generating LVI for all dominating blocks,
1890 // that contain redundant/uninteresting information, we print LVI for
1891 // blocks that may use this LVI information (such as immediate successor
1892 // blocks, and blocks that contain uses of `I`).
1893 auto printResult = [&](const BasicBlock *BB) {
1894 if (!BlocksContainingLVI.insert(BB).second)
1896 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1897 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
1898 OS << "; LatticeVal for: '" << *I << "' in BB: '";
1899 BB->printAsOperand(OS, false);
1900 OS << "' is: " << Result << "\n";
1903 printResult(ParentBB);
1904 // Print the LVI analysis results for the immediate successor blocks, that
1905 // are dominated by `ParentBB`.
1906 for (auto *BBSucc : successors(ParentBB))
1907 if (DT.dominates(ParentBB, BBSucc))
1908 printResult(BBSucc);
1910 // Print LVI in blocks where `I` is used.
1911 for (auto *U : I->users())
1912 if (auto *UseI = dyn_cast<Instruction>(U))
1913 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
1914 printResult(UseI->getParent());
1919 // Printer class for LazyValueInfo results.
1920 class LazyValueInfoPrinter : public FunctionPass {
1922 static char ID; // Pass identification, replacement for typeid
1923 LazyValueInfoPrinter() : FunctionPass(ID) {
1924 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry());
1927 void getAnalysisUsage(AnalysisUsage &AU) const override {
1928 AU.setPreservesAll();
1929 AU.addRequired<LazyValueInfoWrapperPass>();
1930 AU.addRequired<DominatorTreeWrapperPass>();
1933 // Get the mandatory dominator tree analysis and pass this in to the
1934 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
1935 bool runOnFunction(Function &F) override {
1936 dbgs() << "LVI for function '" << F.getName() << "':\n";
1937 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
1938 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1939 LVI.printLVI(F, DTree, dbgs());
1945 char LazyValueInfoPrinter::ID = 0;
1946 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",
1947 "Lazy Value Info Printer Pass", false, false)
1948 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
1949 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",
1950 "Lazy Value Info Printer Pass", false, false)